27 #define COMMON_CORE_INT16_MMX2 \
28 x86_reg len= -2*c->filter_length;\
30 "movq "MANGLE(ff_resample_int16_rounder)", %%mm0 \n\t"\
32 "movq (%1, %0), %%mm1 \n\t"\
33 "pmaddwd (%2, %0), %%mm1 \n\t"\
34 "paddd %%mm1, %%mm0 \n\t"\
37 "pshufw $0x0E, %%mm0, %%mm1 \n\t"\
38 "paddd %%mm1, %%mm0 \n\t"\
39 "psrad $15, %%mm0 \n\t"\
40 "packssdw %%mm0, %%mm0 \n\t"\
41 "movd %%mm0, (%3) \n\t"\
43 : "r" (((uint8_t*)(src+sample_index))-len),\
44 "r" (((uint8_t*)filter)-len),\
46 NAMED_CONSTRAINTS_ARRAY_ADD(ff_resample_int16_rounder)\
49 #define LINEAR_CORE_INT16_MMX2 \
50 x86_reg len= -2*c->filter_length;\
52 "pxor %%mm0, %%mm0 \n\t"\
53 "pxor %%mm2, %%mm2 \n\t"\
55 "movq (%3, %0), %%mm1 \n\t"\
56 "movq %%mm1, %%mm3 \n\t"\
57 "pmaddwd (%4, %0), %%mm1 \n\t"\
58 "pmaddwd (%5, %0), %%mm3 \n\t"\
59 "paddd %%mm1, %%mm0 \n\t"\
60 "paddd %%mm3, %%mm2 \n\t"\
63 "pshufw $0x0E, %%mm0, %%mm1 \n\t"\
64 "pshufw $0x0E, %%mm2, %%mm3 \n\t"\
65 "paddd %%mm1, %%mm0 \n\t"\
66 "paddd %%mm3, %%mm2 \n\t"\
67 "movd %%mm0, %1 \n\t"\
68 "movd %%mm2, %2 \n\t"\
72 : "r" (((uint8_t*)(src+sample_index))-len),\
73 "r" (((uint8_t*)filter)-len),\
74 "r" (((uint8_t*)(filter+c->filter_alloc))-len)\
77 #define COMMON_CORE_INT16_SSE2 \
78 x86_reg len= -2*c->filter_length;\
80 "movdqa "MANGLE(ff_resample_int16_rounder)", %%xmm0 \n\t"\
82 "movdqu (%1, %0), %%xmm1 \n\t"\
83 "pmaddwd (%2, %0), %%xmm1 \n\t"\
84 "paddd %%xmm1, %%xmm0 \n\t"\
87 "pshufd $0x0E, %%xmm0, %%xmm1 \n\t"\
88 "paddd %%xmm1, %%xmm0 \n\t"\
89 "pshufd $0x01, %%xmm0, %%xmm1 \n\t"\
90 "paddd %%xmm1, %%xmm0 \n\t"\
91 "psrad $15, %%xmm0 \n\t"\
92 "packssdw %%xmm0, %%xmm0 \n\t"\
93 "movd %%xmm0, (%3) \n\t"\
95 : "r" (((uint8_t*)(src+sample_index))-len),\
96 "r" (((uint8_t*)filter)-len),\
98 NAMED_CONSTRAINTS_ARRAY_ADD(ff_resample_int16_rounder)\
99 XMM_CLOBBERS_ONLY("%xmm0", "%xmm1")\
102 #define LINEAR_CORE_INT16_SSE2 \
103 x86_reg len= -2*c->filter_length;\
105 "pxor %%xmm0, %%xmm0 \n\t"\
106 "pxor %%xmm2, %%xmm2 \n\t"\
108 "movdqu (%3, %0), %%xmm1 \n\t"\
109 "movdqa %%xmm1, %%xmm3 \n\t"\
110 "pmaddwd (%4, %0), %%xmm1 \n\t"\
111 "pmaddwd (%5, %0), %%xmm3 \n\t"\
112 "paddd %%xmm1, %%xmm0 \n\t"\
113 "paddd %%xmm3, %%xmm2 \n\t"\
116 "pshufd $0x0E, %%xmm0, %%xmm1 \n\t"\
117 "pshufd $0x0E, %%xmm2, %%xmm3 \n\t"\
118 "paddd %%xmm1, %%xmm0 \n\t"\
119 "paddd %%xmm3, %%xmm2 \n\t"\
120 "pshufd $0x01, %%xmm0, %%xmm1 \n\t"\
121 "pshufd $0x01, %%xmm2, %%xmm3 \n\t"\
122 "paddd %%xmm1, %%xmm0 \n\t"\
123 "paddd %%xmm3, %%xmm2 \n\t"\
124 "movd %%xmm0, %1 \n\t"\
125 "movd %%xmm2, %2 \n\t"\
129 : "r" (((uint8_t*)(src+sample_index))-len),\
130 "r" (((uint8_t*)filter)-len),\
131 "r" (((uint8_t*)(filter+c->filter_alloc))-len)\
132 XMM_CLOBBERS_ONLY("%xmm0", "%xmm1", "%xmm2", "%xmm3")\
135 #define COMMON_CORE_DBL_SSE2 \
136 x86_reg len= -8*c->filter_length;\
138 "xorpd %%xmm0, %%xmm0 \n\t"\
140 "movupd (%1, %0), %%xmm1 \n\t"\
141 "mulpd (%2, %0), %%xmm1 \n\t"\
142 "addpd %%xmm1, %%xmm0 \n\t"\
145 "movhlps %%xmm0, %%xmm1 \n\t"\
146 "addpd %%xmm1, %%xmm0 \n\t"\
147 "movsd %%xmm0, (%3) \n\t"\
149 : "r" (((uint8_t*)(src+sample_index))-len),\
150 "r" (((uint8_t*)filter)-len),\
152 XMM_CLOBBERS_ONLY("%xmm0", "%xmm1")\
155 #define LINEAR_CORE_DBL_SSE2 \
156 x86_reg len= -8*c->filter_length;\
158 "xorpd %%xmm0, %%xmm0 \n\t"\
159 "xorpd %%xmm2, %%xmm2 \n\t"\
161 "movupd (%3, %0), %%xmm1 \n\t"\
162 "movapd %%xmm1, %%xmm3 \n\t"\
163 "mulpd (%4, %0), %%xmm1 \n\t"\
164 "mulpd (%5, %0), %%xmm3 \n\t"\
165 "addpd %%xmm1, %%xmm0 \n\t"\
166 "addpd %%xmm3, %%xmm2 \n\t"\
169 "movhlps %%xmm0, %%xmm1 \n\t"\
170 "movhlps %%xmm2, %%xmm3 \n\t"\
171 "addpd %%xmm1, %%xmm0 \n\t"\
172 "addpd %%xmm3, %%xmm2 \n\t"\
173 "movsd %%xmm0, %1 \n\t"\
174 "movsd %%xmm2, %2 \n\t"\
178 : "r" (((uint8_t*)(src+sample_index))-len),\
179 "r" (((uint8_t*)filter)-len),\
180 "r" (((uint8_t*)(filter+c->filter_alloc))-len)\
181 XMM_CLOBBERS_ONLY("%xmm0", "%xmm1", "%xmm2", "%xmm3")\