FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
cavsdsp.c
Go to the documentation of this file.
1 /*
2  * Chinese AVS video (AVS1-P2, JiZhun profile) decoder.
3  * Copyright (c) 2006 Stefan Gehrer <stefan.gehrer@gmx.de>
4  *
5  * MMX-optimized DSP functions, based on H.264 optimizations by
6  * Michael Niedermayer and Loren Merritt
7  *
8  * This file is part of FFmpeg.
9  *
10  * FFmpeg is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * FFmpeg is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with FFmpeg; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24 
25 #include "libavutil/attributes.h"
26 #include "libavutil/common.h"
27 #include "libavutil/cpu.h"
28 #include "libavutil/x86/asm.h"
29 #include "libavutil/x86/cpu.h"
30 #include "libavcodec/cavsdsp.h"
31 #include "libavcodec/idctdsp.h"
32 #include "constants.h"
33 #include "fpel.h"
34 #include "idctdsp.h"
35 #include "config.h"
36 
37 #if HAVE_MMX_INLINE
38 
39 /* in/out: mma=mma+mmb, mmb=mmb-mma */
40 #define SUMSUB_BA( a, b ) \
41  "paddw "#b", "#a" \n\t"\
42  "paddw "#b", "#b" \n\t"\
43  "psubw "#a", "#b" \n\t"
44 
45 /*****************************************************************************
46  *
47  * inverse transform
48  *
49  ****************************************************************************/
50 
51 static inline void cavs_idct8_1d(int16_t *block, uint64_t bias)
52 {
53  __asm__ volatile(
54  "movq 112(%0), %%mm4 \n\t" /* mm4 = src7 */
55  "movq 16(%0), %%mm5 \n\t" /* mm5 = src1 */
56  "movq 80(%0), %%mm2 \n\t" /* mm2 = src5 */
57  "movq 48(%0), %%mm7 \n\t" /* mm7 = src3 */
58  "movq %%mm4, %%mm0 \n\t"
59  "movq %%mm5, %%mm3 \n\t"
60  "movq %%mm2, %%mm6 \n\t"
61  "movq %%mm7, %%mm1 \n\t"
62 
63  "paddw %%mm4, %%mm4 \n\t" /* mm4 = 2*src7 */
64  "paddw %%mm3, %%mm3 \n\t" /* mm3 = 2*src1 */
65  "paddw %%mm6, %%mm6 \n\t" /* mm6 = 2*src5 */
66  "paddw %%mm1, %%mm1 \n\t" /* mm1 = 2*src3 */
67  "paddw %%mm4, %%mm0 \n\t" /* mm0 = 3*src7 */
68  "paddw %%mm3, %%mm5 \n\t" /* mm5 = 3*src1 */
69  "paddw %%mm6, %%mm2 \n\t" /* mm2 = 3*src5 */
70  "paddw %%mm1, %%mm7 \n\t" /* mm7 = 3*src3 */
71  "psubw %%mm4, %%mm5 \n\t" /* mm5 = 3*src1 - 2*src7 = a0 */
72  "paddw %%mm6, %%mm7 \n\t" /* mm7 = 3*src3 + 2*src5 = a1 */
73  "psubw %%mm2, %%mm1 \n\t" /* mm1 = 2*src3 - 3*src5 = a2 */
74  "paddw %%mm0, %%mm3 \n\t" /* mm3 = 2*src1 + 3*src7 = a3 */
75 
76  "movq %%mm5, %%mm4 \n\t"
77  "movq %%mm7, %%mm6 \n\t"
78  "movq %%mm3, %%mm0 \n\t"
79  "movq %%mm1, %%mm2 \n\t"
80  SUMSUB_BA( %%mm7, %%mm5 ) /* mm7 = a0 + a1 mm5 = a0 - a1 */
81  "paddw %%mm3, %%mm7 \n\t" /* mm7 = a0 + a1 + a3 */
82  "paddw %%mm1, %%mm5 \n\t" /* mm5 = a0 - a1 + a2 */
83  "paddw %%mm7, %%mm7 \n\t"
84  "paddw %%mm5, %%mm5 \n\t"
85  "paddw %%mm6, %%mm7 \n\t" /* mm7 = b4 */
86  "paddw %%mm4, %%mm5 \n\t" /* mm5 = b5 */
87 
88  SUMSUB_BA( %%mm1, %%mm3 ) /* mm1 = a3 + a2 mm3 = a3 - a2 */
89  "psubw %%mm1, %%mm4 \n\t" /* mm4 = a0 - a2 - a3 */
90  "movq %%mm4, %%mm1 \n\t" /* mm1 = a0 - a2 - a3 */
91  "psubw %%mm6, %%mm3 \n\t" /* mm3 = a3 - a2 - a1 */
92  "paddw %%mm1, %%mm1 \n\t"
93  "paddw %%mm3, %%mm3 \n\t"
94  "psubw %%mm2, %%mm1 \n\t" /* mm1 = b7 */
95  "paddw %%mm0, %%mm3 \n\t" /* mm3 = b6 */
96 
97  "movq 32(%0), %%mm2 \n\t" /* mm2 = src2 */
98  "movq 96(%0), %%mm6 \n\t" /* mm6 = src6 */
99  "movq %%mm2, %%mm4 \n\t"
100  "movq %%mm6, %%mm0 \n\t"
101  "psllw $2, %%mm4 \n\t" /* mm4 = 4*src2 */
102  "psllw $2, %%mm6 \n\t" /* mm6 = 4*src6 */
103  "paddw %%mm4, %%mm2 \n\t" /* mm2 = 5*src2 */
104  "paddw %%mm6, %%mm0 \n\t" /* mm0 = 5*src6 */
105  "paddw %%mm2, %%mm2 \n\t"
106  "paddw %%mm0, %%mm0 \n\t"
107  "psubw %%mm0, %%mm4 \n\t" /* mm4 = 4*src2 - 10*src6 = a7 */
108  "paddw %%mm2, %%mm6 \n\t" /* mm6 = 4*src6 + 10*src2 = a6 */
109 
110  "movq (%0), %%mm2 \n\t" /* mm2 = src0 */
111  "movq 64(%0), %%mm0 \n\t" /* mm0 = src4 */
112  SUMSUB_BA( %%mm0, %%mm2 ) /* mm0 = src0+src4 mm2 = src0-src4 */
113  "psllw $3, %%mm0 \n\t"
114  "psllw $3, %%mm2 \n\t"
115  "paddw %1, %%mm0 \n\t" /* add rounding bias */
116  "paddw %1, %%mm2 \n\t" /* add rounding bias */
117 
118  SUMSUB_BA( %%mm6, %%mm0 ) /* mm6 = a4 + a6 mm0 = a4 - a6 */
119  SUMSUB_BA( %%mm4, %%mm2 ) /* mm4 = a5 + a7 mm2 = a5 - a7 */
120  SUMSUB_BA( %%mm7, %%mm6 ) /* mm7 = dst0 mm6 = dst7 */
121  SUMSUB_BA( %%mm5, %%mm4 ) /* mm5 = dst1 mm4 = dst6 */
122  SUMSUB_BA( %%mm3, %%mm2 ) /* mm3 = dst2 mm2 = dst5 */
123  SUMSUB_BA( %%mm1, %%mm0 ) /* mm1 = dst3 mm0 = dst4 */
124  :: "r"(block), "m"(bias)
125  );
126 }
127 
128 #define SBUTTERFLY(a,b,t,n,m)\
129  "mov" #m " " #a ", " #t " \n\t" /* abcd */\
130  "punpckl" #n " " #b ", " #a " \n\t" /* aebf */\
131  "punpckh" #n " " #b ", " #t " \n\t" /* cgdh */\
132 
133 #define TRANSPOSE4(a,b,c,d,t)\
134  SBUTTERFLY(a,b,t,wd,q) /* a=aebf t=cgdh */\
135  SBUTTERFLY(c,d,b,wd,q) /* c=imjn b=kolp */\
136  SBUTTERFLY(a,c,d,dq,q) /* a=aeim d=bfjn */\
137  SBUTTERFLY(t,b,c,dq,q) /* t=cgko c=dhlp */
138 
139 static void cavs_idct8_add_mmx(uint8_t *dst, int16_t *block, ptrdiff_t stride)
140 {
141  int i;
142  LOCAL_ALIGNED(16, int16_t, b2, [64]);
143 
144  for(i=0; i<2; i++){
145  cavs_idct8_1d(block + 4 * i, ff_pw_4.a);
146 
147  __asm__ volatile(
148  "psraw $3, %%mm7 \n\t"
149  "psraw $3, %%mm6 \n\t"
150  "psraw $3, %%mm5 \n\t"
151  "psraw $3, %%mm4 \n\t"
152  "psraw $3, %%mm3 \n\t"
153  "psraw $3, %%mm2 \n\t"
154  "psraw $3, %%mm1 \n\t"
155  "psraw $3, %%mm0 \n\t"
156  "movq %%mm7, (%0) \n\t"
157  TRANSPOSE4( %%mm0, %%mm2, %%mm4, %%mm6, %%mm7 )
158  "movq %%mm0, 8(%0) \n\t"
159  "movq %%mm6, 24(%0) \n\t"
160  "movq %%mm7, 40(%0) \n\t"
161  "movq %%mm4, 56(%0) \n\t"
162  "movq (%0), %%mm7 \n\t"
163  TRANSPOSE4( %%mm7, %%mm5, %%mm3, %%mm1, %%mm0 )
164  "movq %%mm7, (%0) \n\t"
165  "movq %%mm1, 16(%0) \n\t"
166  "movq %%mm0, 32(%0) \n\t"
167  "movq %%mm3, 48(%0) \n\t"
168  :
169  : "r"(b2 + 32 * i)
170  : "memory"
171  );
172  }
173 
174  for(i=0; i<2; i++){
175  cavs_idct8_1d(b2+4*i, ff_pw_64.a);
176 
177  __asm__ volatile(
178  "psraw $7, %%mm7 \n\t"
179  "psraw $7, %%mm6 \n\t"
180  "psraw $7, %%mm5 \n\t"
181  "psraw $7, %%mm4 \n\t"
182  "psraw $7, %%mm3 \n\t"
183  "psraw $7, %%mm2 \n\t"
184  "psraw $7, %%mm1 \n\t"
185  "psraw $7, %%mm0 \n\t"
186  "movq %%mm7, (%0) \n\t"
187  "movq %%mm5, 16(%0) \n\t"
188  "movq %%mm3, 32(%0) \n\t"
189  "movq %%mm1, 48(%0) \n\t"
190  "movq %%mm0, 64(%0) \n\t"
191  "movq %%mm2, 80(%0) \n\t"
192  "movq %%mm4, 96(%0) \n\t"
193  "movq %%mm6, 112(%0) \n\t"
194  :: "r"(b2+4*i)
195  : "memory"
196  );
197  }
198 
199  ff_add_pixels_clamped(b2, dst, stride);
200 }
201 
202 #endif /* HAVE_MMX_INLINE */
203 
204 #if (HAVE_MMXEXT_INLINE || HAVE_AMD3DNOW_INLINE)
205 
206 /*****************************************************************************
207  *
208  * motion compensation
209  *
210  ****************************************************************************/
211 
212 /* vertical filter [-1 -2 96 42 -7 0] */
213 #define QPEL_CAVSV1(A,B,C,D,E,F,OP,ADD, MUL1, MUL2) \
214  "movd (%0), "#F" \n\t"\
215  "movq "#C", %%mm6 \n\t"\
216  "pmullw "MANGLE(MUL1)", %%mm6\n\t"\
217  "movq "#D", %%mm7 \n\t"\
218  "pmullw "MANGLE(MUL2)", %%mm7\n\t"\
219  "psllw $3, "#E" \n\t"\
220  "psubw "#E", %%mm6 \n\t"\
221  "psraw $3, "#E" \n\t"\
222  "paddw %%mm7, %%mm6 \n\t"\
223  "paddw "#E", %%mm6 \n\t"\
224  "paddw "#B", "#B" \n\t"\
225  "pxor %%mm7, %%mm7 \n\t"\
226  "add %2, %0 \n\t"\
227  "punpcklbw %%mm7, "#F" \n\t"\
228  "psubw "#B", %%mm6 \n\t"\
229  "psraw $1, "#B" \n\t"\
230  "psubw "#A", %%mm6 \n\t"\
231  "paddw "MANGLE(ADD)", %%mm6 \n\t"\
232  "psraw $7, %%mm6 \n\t"\
233  "packuswb %%mm6, %%mm6 \n\t"\
234  OP(%%mm6, (%1), A, d) \
235  "add %3, %1 \n\t"
236 
237 /* vertical filter [ 0 -1 5 5 -1 0] */
238 #define QPEL_CAVSV2(A,B,C,D,E,F,OP,ADD, MUL1, MUL2) \
239  "movd (%0), "#F" \n\t"\
240  "movq "#C", %%mm6 \n\t"\
241  "paddw "#D", %%mm6 \n\t"\
242  "pmullw "MANGLE(MUL1)", %%mm6\n\t"\
243  "add %2, %0 \n\t"\
244  "punpcklbw %%mm7, "#F" \n\t"\
245  "psubw "#B", %%mm6 \n\t"\
246  "psubw "#E", %%mm6 \n\t"\
247  "paddw "MANGLE(ADD)", %%mm6 \n\t"\
248  "psraw $3, %%mm6 \n\t"\
249  "packuswb %%mm6, %%mm6 \n\t"\
250  OP(%%mm6, (%1), A, d) \
251  "add %3, %1 \n\t"
252 
253 /* vertical filter [ 0 -7 42 96 -2 -1] */
254 #define QPEL_CAVSV3(A,B,C,D,E,F,OP,ADD, MUL1, MUL2) \
255  "movd (%0), "#F" \n\t"\
256  "movq "#C", %%mm6 \n\t"\
257  "pmullw "MANGLE(MUL2)", %%mm6\n\t"\
258  "movq "#D", %%mm7 \n\t"\
259  "pmullw "MANGLE(MUL1)", %%mm7\n\t"\
260  "psllw $3, "#B" \n\t"\
261  "psubw "#B", %%mm6 \n\t"\
262  "psraw $3, "#B" \n\t"\
263  "paddw %%mm7, %%mm6 \n\t"\
264  "paddw "#B", %%mm6 \n\t"\
265  "paddw "#E", "#E" \n\t"\
266  "pxor %%mm7, %%mm7 \n\t"\
267  "add %2, %0 \n\t"\
268  "punpcklbw %%mm7, "#F" \n\t"\
269  "psubw "#E", %%mm6 \n\t"\
270  "psraw $1, "#E" \n\t"\
271  "psubw "#F", %%mm6 \n\t"\
272  "paddw "MANGLE(ADD)", %%mm6 \n\t"\
273  "psraw $7, %%mm6 \n\t"\
274  "packuswb %%mm6, %%mm6 \n\t"\
275  OP(%%mm6, (%1), A, d) \
276  "add %3, %1 \n\t"
277 
278 
279 #define QPEL_CAVSVNUM(VOP,OP,ADD,MUL1,MUL2)\
280  int w= 2;\
281  src -= 2*srcStride;\
282  \
283  while(w--){\
284  __asm__ volatile(\
285  "pxor %%mm7, %%mm7 \n\t"\
286  "movd (%0), %%mm0 \n\t"\
287  "add %2, %0 \n\t"\
288  "movd (%0), %%mm1 \n\t"\
289  "add %2, %0 \n\t"\
290  "movd (%0), %%mm2 \n\t"\
291  "add %2, %0 \n\t"\
292  "movd (%0), %%mm3 \n\t"\
293  "add %2, %0 \n\t"\
294  "movd (%0), %%mm4 \n\t"\
295  "add %2, %0 \n\t"\
296  "punpcklbw %%mm7, %%mm0 \n\t"\
297  "punpcklbw %%mm7, %%mm1 \n\t"\
298  "punpcklbw %%mm7, %%mm2 \n\t"\
299  "punpcklbw %%mm7, %%mm3 \n\t"\
300  "punpcklbw %%mm7, %%mm4 \n\t"\
301  VOP(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP, ADD, MUL1, MUL2)\
302  VOP(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP, ADD, MUL1, MUL2)\
303  VOP(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP, ADD, MUL1, MUL2)\
304  VOP(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP, ADD, MUL1, MUL2)\
305  VOP(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, OP, ADD, MUL1, MUL2)\
306  VOP(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, OP, ADD, MUL1, MUL2)\
307  VOP(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP, ADD, MUL1, MUL2)\
308  VOP(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP, ADD, MUL1, MUL2)\
309  \
310  : "+a"(src), "+c"(dst)\
311  : "S"((x86_reg)srcStride), "r"((x86_reg)dstStride)\
312  NAMED_CONSTRAINTS_ADD(ADD,MUL1,MUL2)\
313  : "memory"\
314  );\
315  if(h==16){\
316  __asm__ volatile(\
317  VOP(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP, ADD, MUL1, MUL2)\
318  VOP(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP, ADD, MUL1, MUL2)\
319  VOP(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, OP, ADD, MUL1, MUL2)\
320  VOP(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, OP, ADD, MUL1, MUL2)\
321  VOP(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP, ADD, MUL1, MUL2)\
322  VOP(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP, ADD, MUL1, MUL2)\
323  VOP(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP, ADD, MUL1, MUL2)\
324  VOP(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP, ADD, MUL1, MUL2)\
325  \
326  : "+a"(src), "+c"(dst)\
327  : "S"((x86_reg)srcStride), "r"((x86_reg)dstStride)\
328  NAMED_CONSTRAINTS_ADD(ADD,MUL1,MUL2)\
329  : "memory"\
330  );\
331  }\
332  src += 4-(h+5)*srcStride;\
333  dst += 4-h*dstStride;\
334  }
335 
336 #define QPEL_CAVS(OPNAME, OP, MMX)\
337 static void OPNAME ## cavs_qpel8_h_ ## MMX(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride)\
338 {\
339  int h=8;\
340  __asm__ volatile(\
341  "pxor %%mm7, %%mm7 \n\t"\
342  "movq "MANGLE(ff_pw_5)", %%mm6\n\t"\
343  "1: \n\t"\
344  "movq (%0), %%mm0 \n\t"\
345  "movq 1(%0), %%mm2 \n\t"\
346  "movq %%mm0, %%mm1 \n\t"\
347  "movq %%mm2, %%mm3 \n\t"\
348  "punpcklbw %%mm7, %%mm0 \n\t"\
349  "punpckhbw %%mm7, %%mm1 \n\t"\
350  "punpcklbw %%mm7, %%mm2 \n\t"\
351  "punpckhbw %%mm7, %%mm3 \n\t"\
352  "paddw %%mm2, %%mm0 \n\t"\
353  "paddw %%mm3, %%mm1 \n\t"\
354  "pmullw %%mm6, %%mm0 \n\t"\
355  "pmullw %%mm6, %%mm1 \n\t"\
356  "movq -1(%0), %%mm2 \n\t"\
357  "movq 2(%0), %%mm4 \n\t"\
358  "movq %%mm2, %%mm3 \n\t"\
359  "movq %%mm4, %%mm5 \n\t"\
360  "punpcklbw %%mm7, %%mm2 \n\t"\
361  "punpckhbw %%mm7, %%mm3 \n\t"\
362  "punpcklbw %%mm7, %%mm4 \n\t"\
363  "punpckhbw %%mm7, %%mm5 \n\t"\
364  "paddw %%mm4, %%mm2 \n\t"\
365  "paddw %%mm3, %%mm5 \n\t"\
366  "psubw %%mm2, %%mm0 \n\t"\
367  "psubw %%mm5, %%mm1 \n\t"\
368  "movq "MANGLE(ff_pw_4)", %%mm5\n\t"\
369  "paddw %%mm5, %%mm0 \n\t"\
370  "paddw %%mm5, %%mm1 \n\t"\
371  "psraw $3, %%mm0 \n\t"\
372  "psraw $3, %%mm1 \n\t"\
373  "packuswb %%mm1, %%mm0 \n\t"\
374  OP(%%mm0, (%1),%%mm5, q) \
375  "add %3, %0 \n\t"\
376  "add %4, %1 \n\t"\
377  "decl %2 \n\t"\
378  " jnz 1b \n\t"\
379  : "+a"(src), "+c"(dst), "+m"(h)\
380  : "d"((x86_reg)srcStride), "S"((x86_reg)dstStride)\
381  NAMED_CONSTRAINTS_ADD(ff_pw_4,ff_pw_5)\
382  : "memory"\
383  );\
384 }\
385 \
386 static inline void OPNAME ## cavs_qpel8or16_v1_ ## MMX(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride, int h)\
387 { \
388  QPEL_CAVSVNUM(QPEL_CAVSV1,OP,ff_pw_64,ff_pw_96,ff_pw_42) \
389 }\
390 \
391 static inline void OPNAME ## cavs_qpel8or16_v2_ ## MMX(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride, int h)\
392 { \
393  QPEL_CAVSVNUM(QPEL_CAVSV2,OP,ff_pw_4,ff_pw_5,ff_pw_42) \
394 }\
395 \
396 static inline void OPNAME ## cavs_qpel8or16_v3_ ## MMX(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride, int h)\
397 { \
398  QPEL_CAVSVNUM(QPEL_CAVSV3,OP,ff_pw_64,ff_pw_96,ff_pw_42) \
399 }\
400 \
401 static void OPNAME ## cavs_qpel8_v1_ ## MMX(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride)\
402 { \
403  OPNAME ## cavs_qpel8or16_v1_ ## MMX(dst , src , dstStride, srcStride, 8);\
404 }\
405 static void OPNAME ## cavs_qpel16_v1_ ## MMX(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride)\
406 { \
407  OPNAME ## cavs_qpel8or16_v1_ ## MMX(dst , src , dstStride, srcStride, 16);\
408  OPNAME ## cavs_qpel8or16_v1_ ## MMX(dst+8, src+8, dstStride, srcStride, 16);\
409 }\
410 \
411 static void OPNAME ## cavs_qpel8_v2_ ## MMX(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride)\
412 { \
413  OPNAME ## cavs_qpel8or16_v2_ ## MMX(dst , src , dstStride, srcStride, 8);\
414 }\
415 static void OPNAME ## cavs_qpel16_v2_ ## MMX(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride)\
416 { \
417  OPNAME ## cavs_qpel8or16_v2_ ## MMX(dst , src , dstStride, srcStride, 16);\
418  OPNAME ## cavs_qpel8or16_v2_ ## MMX(dst+8, src+8, dstStride, srcStride, 16);\
419 }\
420 \
421 static void OPNAME ## cavs_qpel8_v3_ ## MMX(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride)\
422 { \
423  OPNAME ## cavs_qpel8or16_v3_ ## MMX(dst , src , dstStride, srcStride, 8);\
424 }\
425 static void OPNAME ## cavs_qpel16_v3_ ## MMX(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride)\
426 { \
427  OPNAME ## cavs_qpel8or16_v3_ ## MMX(dst , src , dstStride, srcStride, 16);\
428  OPNAME ## cavs_qpel8or16_v3_ ## MMX(dst+8, src+8, dstStride, srcStride, 16);\
429 }\
430 \
431 static void OPNAME ## cavs_qpel16_h_ ## MMX(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride)\
432 { \
433  OPNAME ## cavs_qpel8_h_ ## MMX(dst , src , dstStride, srcStride);\
434  OPNAME ## cavs_qpel8_h_ ## MMX(dst+8, src+8, dstStride, srcStride);\
435  src += 8*srcStride;\
436  dst += 8*dstStride;\
437  OPNAME ## cavs_qpel8_h_ ## MMX(dst , src , dstStride, srcStride);\
438  OPNAME ## cavs_qpel8_h_ ## MMX(dst+8, src+8, dstStride, srcStride);\
439 }\
440 
441 #define CAVS_MC(OPNAME, SIZE, MMX) \
442 static void OPNAME ## cavs_qpel ## SIZE ## _mc20_ ## MMX(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
443 {\
444  OPNAME ## cavs_qpel ## SIZE ## _h_ ## MMX(dst, src, stride, stride);\
445 }\
446 \
447 static void OPNAME ## cavs_qpel ## SIZE ## _mc01_ ## MMX(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
448 {\
449  OPNAME ## cavs_qpel ## SIZE ## _v1_ ## MMX(dst, src, stride, stride);\
450 }\
451 \
452 static void OPNAME ## cavs_qpel ## SIZE ## _mc02_ ## MMX(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
453 {\
454  OPNAME ## cavs_qpel ## SIZE ## _v2_ ## MMX(dst, src, stride, stride);\
455 }\
456 \
457 static void OPNAME ## cavs_qpel ## SIZE ## _mc03_ ## MMX(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
458 {\
459  OPNAME ## cavs_qpel ## SIZE ## _v3_ ## MMX(dst, src, stride, stride);\
460 }\
461 
462 #define PUT_OP(a,b,temp, size) "mov" #size " " #a ", " #b " \n\t"
463 #define AVG_3DNOW_OP(a,b,temp, size) \
464 "mov" #size " " #b ", " #temp " \n\t"\
465 "pavgusb " #temp ", " #a " \n\t"\
466 "mov" #size " " #a ", " #b " \n\t"
467 #define AVG_MMXEXT_OP(a, b, temp, size) \
468 "mov" #size " " #b ", " #temp " \n\t"\
469 "pavgb " #temp ", " #a " \n\t"\
470 "mov" #size " " #a ", " #b " \n\t"
471 
472 #endif /* (HAVE_MMXEXT_INLINE || HAVE_AMD3DNOW_INLINE) */
473 
474 #if HAVE_MMX_EXTERNAL
475 static void put_cavs_qpel8_mc00_mmx(uint8_t *dst, const uint8_t *src,
476  ptrdiff_t stride)
477 {
478  ff_put_pixels8_mmx(dst, src, stride, 8);
479 }
480 
481 static void avg_cavs_qpel8_mc00_mmx(uint8_t *dst, const uint8_t *src,
482  ptrdiff_t stride)
483 {
484  ff_avg_pixels8_mmx(dst, src, stride, 8);
485 }
486 
487 static void avg_cavs_qpel8_mc00_mmxext(uint8_t *dst, const uint8_t *src,
488  ptrdiff_t stride)
489 {
490  ff_avg_pixels8_mmxext(dst, src, stride, 8);
491 }
492 
493 static void put_cavs_qpel16_mc00_mmx(uint8_t *dst, const uint8_t *src,
494  ptrdiff_t stride)
495 {
496  ff_put_pixels16_mmx(dst, src, stride, 16);
497 }
498 
499 static void avg_cavs_qpel16_mc00_mmx(uint8_t *dst, const uint8_t *src,
500  ptrdiff_t stride)
501 {
502  ff_avg_pixels16_mmx(dst, src, stride, 16);
503 }
504 
505 static void avg_cavs_qpel16_mc00_mmxext(uint8_t *dst, const uint8_t *src,
506  ptrdiff_t stride)
507 {
508  ff_avg_pixels16_mmxext(dst, src, stride, 16);
509 }
510 
511 static void put_cavs_qpel16_mc00_sse2(uint8_t *dst, const uint8_t *src,
512  ptrdiff_t stride)
513 {
514  ff_put_pixels16_sse2(dst, src, stride, 16);
515 }
516 
517 static void avg_cavs_qpel16_mc00_sse2(uint8_t *dst, const uint8_t *src,
518  ptrdiff_t stride)
519 {
520  ff_avg_pixels16_sse2(dst, src, stride, 16);
521 }
522 #endif
523 
525  AVCodecContext *avctx)
526 {
527 #if HAVE_MMX_EXTERNAL
528  c->put_cavs_qpel_pixels_tab[0][0] = put_cavs_qpel16_mc00_mmx;
529  c->put_cavs_qpel_pixels_tab[1][0] = put_cavs_qpel8_mc00_mmx;
530  c->avg_cavs_qpel_pixels_tab[0][0] = avg_cavs_qpel16_mc00_mmx;
531  c->avg_cavs_qpel_pixels_tab[1][0] = avg_cavs_qpel8_mc00_mmx;
532 #endif
533 
534 #if HAVE_MMX_INLINE
535  c->cavs_idct8_add = cavs_idct8_add_mmx;
537 #endif /* HAVE_MMX_INLINE */
538 }
539 
540 #define DSPFUNC(PFX, IDX, NUM, EXT) \
541  c->PFX ## _cavs_qpel_pixels_tab[IDX][ 2] = PFX ## _cavs_qpel ## NUM ## _mc20_ ## EXT; \
542  c->PFX ## _cavs_qpel_pixels_tab[IDX][ 4] = PFX ## _cavs_qpel ## NUM ## _mc01_ ## EXT; \
543  c->PFX ## _cavs_qpel_pixels_tab[IDX][ 8] = PFX ## _cavs_qpel ## NUM ## _mc02_ ## EXT; \
544  c->PFX ## _cavs_qpel_pixels_tab[IDX][12] = PFX ## _cavs_qpel ## NUM ## _mc03_ ## EXT; \
545 
546 #if HAVE_MMXEXT_INLINE
547 QPEL_CAVS(put_, PUT_OP, mmxext)
548 QPEL_CAVS(avg_, AVG_MMXEXT_OP, mmxext)
549 
550 CAVS_MC(put_, 8, mmxext)
551 CAVS_MC(put_, 16, mmxext)
552 CAVS_MC(avg_, 8, mmxext)
553 CAVS_MC(avg_, 16, mmxext)
554 #endif /* HAVE_MMXEXT_INLINE */
555 
556 #if HAVE_AMD3DNOW_INLINE
557 QPEL_CAVS(put_, PUT_OP, 3dnow)
558 QPEL_CAVS(avg_, AVG_3DNOW_OP, 3dnow)
559 
560 CAVS_MC(put_, 8, 3dnow)
561 CAVS_MC(put_, 16,3dnow)
562 CAVS_MC(avg_, 8, 3dnow)
563 CAVS_MC(avg_, 16,3dnow)
564 
565 static av_cold void cavsdsp_init_3dnow(CAVSDSPContext *c,
566  AVCodecContext *avctx)
567 {
568  DSPFUNC(put, 0, 16, 3dnow);
569  DSPFUNC(put, 1, 8, 3dnow);
570  DSPFUNC(avg, 0, 16, 3dnow);
571  DSPFUNC(avg, 1, 8, 3dnow);
572 }
573 #endif /* HAVE_AMD3DNOW_INLINE */
574 
576 {
578 
579  if (X86_MMX(cpu_flags))
580  cavsdsp_init_mmx(c, avctx);
581 
582 #if HAVE_AMD3DNOW_INLINE
583  if (INLINE_AMD3DNOW(cpu_flags))
584  cavsdsp_init_3dnow(c, avctx);
585 #endif /* HAVE_AMD3DNOW_INLINE */
586 #if HAVE_MMXEXT_INLINE
587  if (INLINE_MMXEXT(cpu_flags)) {
588  DSPFUNC(put, 0, 16, mmxext);
589  DSPFUNC(put, 1, 8, mmxext);
590  DSPFUNC(avg, 0, 16, mmxext);
591  DSPFUNC(avg, 1, 8, mmxext);
592  }
593 #endif
594 #if HAVE_MMX_EXTERNAL
595  if (EXTERNAL_MMXEXT(cpu_flags)) {
596  c->avg_cavs_qpel_pixels_tab[0][0] = avg_cavs_qpel16_mc00_mmxext;
597  c->avg_cavs_qpel_pixels_tab[1][0] = avg_cavs_qpel8_mc00_mmxext;
598  }
599 #endif
600 #if HAVE_SSE2_EXTERNAL
601  if (EXTERNAL_SSE2(cpu_flags)) {
602  c->put_cavs_qpel_pixels_tab[0][0] = put_cavs_qpel16_mc00_sse2;
603  c->avg_cavs_qpel_pixels_tab[0][0] = avg_cavs_qpel16_mc00_sse2;
604  }
605 #endif
606 }
void ff_put_pixels8_mmx(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
void ff_avg_pixels16_sse2(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
static atomic_int cpu_flags
Definition: cpu.c:48
#define src
Definition: vp8dsp.c:254
Macro definitions for various function/variable attributes.
static int16_t block[64]
Definition: dct.c:115
#define DSPFUNC(PFX, IDX, NUM, EXT)
Definition: cavsdsp.c:540
uint8_t
#define av_cold
Definition: attributes.h:82
void(* ff_add_pixels_clamped)(const int16_t *block, uint8_t *pixels, ptrdiff_t line_size)
Definition: idctdsp.c:84
void ff_avg_pixels16_mmxext(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
const uint64_t ff_pw_64
Definition: constants.c:45
#define EXTERNAL_SSE2(flags)
Definition: cpu.h:59
const uint64_t ff_pw_4
Definition: constants.c:29
const char * r
Definition: vf_curves.c:111
#define X86_MMX(flags)
Definition: cpu.h:31
qpel_mc_func put_cavs_qpel_pixels_tab[2][16]
Definition: cavsdsp.h:31
#define CAVS_MC(OPNAME, SIZE)
Definition: cavsdsp.c:439
#define INLINE_AMD3DNOW(flags)
Definition: cpu.h:83
main external API structure.
Definition: avcodec.h:1722
int idct_perm
Definition: cavsdsp.h:38
void(* cavs_idct8_add)(uint8_t *dst, int16_t *block, ptrdiff_t stride)
Definition: cavsdsp.h:37
static av_cold void cavsdsp_init_mmx(CAVSDSPContext *c, AVCodecContext *avctx)
Definition: cavsdsp.c:524
qpel_mc_func avg_cavs_qpel_pixels_tab[2][16]
Definition: cavsdsp.h:32
av_cold void ff_cavsdsp_init_x86(CAVSDSPContext *c, AVCodecContext *avctx)
Definition: cavsdsp.c:575
int av_get_cpu_flags(void)
Return the flags which specify extensions supported by the CPU.
Definition: cpu.c:89
void ff_avg_pixels8_mmxext(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
#define EXTERNAL_MMXEXT(flags)
Definition: cpu.h:57
#define avg(a, b, c, d)
#define INLINE_MMXEXT(flags)
Definition: cpu.h:86
GLint GLenum GLboolean GLsizei stride
Definition: opengl_enc.c:105
void ff_avg_pixels8_mmx(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
common internal and external API header
void ff_avg_pixels16_mmx(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
#define LOCAL_ALIGNED(a, t, v,...)
Definition: internal.h:114
static double c[64]
void ff_put_pixels16_sse2(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
void ff_put_pixels16_mmx(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
#define av_unused
Definition: attributes.h:125