44 0x0200020002000200LL,};
48 0x0004000400040004LL,};
61 DECLARE_ALIGNED(8,
const uint64_t, ff_bgr2YOffset) = 0x1010101010101010ULL;
62 DECLARE_ALIGNED(8,
const uint64_t, ff_bgr2UVOffset) = 0x8080808080808080ULL;
69 #define COMPILE_TEMPLATE_MMXEXT 0
70 #define RENAME(a) a ## _mmx
75 #if HAVE_MMXEXT_INLINE
77 #undef COMPILE_TEMPLATE_MMXEXT
78 #define COMPILE_TEMPLATE_MMXEXT 1
79 #define RENAME(a) a ## _mmxext
103 const int firstLumSrcY= vLumFilterPos[
dstY];
104 const int firstChrSrcY= vChrFilterPos[chrDstY];
112 if (dstY < dstH - 2) {
113 const int16_t **lumSrcPtr= (
const int16_t **)(
void*) lumPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf +
vLumBufSize;
114 const int16_t **chrUSrcPtr= (
const int16_t **)(
void*) chrUPixBuf + chrBufIndex + firstChrSrcY - lastInChrBuf +
vChrBufSize;
115 const int16_t **alpSrcPtr= (CONFIG_SWSCALE_ALPHA &&
alpPixBuf) ? (
const int16_t **)(
void*) alpPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize : NULL;
118 if (firstLumSrcY < 0 || firstLumSrcY + vLumFilterSize > c->
srcH) {
119 const int16_t **tmpY = (
const int16_t **) lumPixBuf + 2 * vLumBufSize;
120 int neg = -firstLumSrcY, i,
end =
FFMIN(c->
srcH - firstLumSrcY, vLumFilterSize);
121 for (i = 0; i < neg; i++)
122 tmpY[i] = lumSrcPtr[neg];
123 for ( ; i <
end; i++)
124 tmpY[i] = lumSrcPtr[i];
130 const int16_t **tmpA = (
const int16_t **) alpPixBuf + 2 * vLumBufSize;
131 for (i = 0; i < neg; i++)
132 tmpA[i] = alpSrcPtr[neg];
133 for ( ; i <
end; i++)
134 tmpA[i] = alpSrcPtr[i];
136 tmpA[i] = tmpA[i - 1];
140 if (firstChrSrcY < 0 || firstChrSrcY + vChrFilterSize > c->
chrSrcH) {
141 const int16_t **tmpU = (
const int16_t **) chrUPixBuf + 2 * vChrBufSize;
142 int neg = -firstChrSrcY, i, end =
FFMIN(c->
chrSrcH - firstChrSrcY, vChrFilterSize);
143 for (i = 0; i < neg; i++) {
144 tmpU[i] = chrUSrcPtr[neg];
146 for ( ; i <
end; i++) {
147 tmpU[i] = chrUSrcPtr[i];
150 tmpU[i] = tmpU[i - 1];
158 *(
const void**)&lumMmxFilter[s*i ]= lumSrcPtr[i ];
159 *(
const void**)&lumMmxFilter[s*i+
APCK_PTR2/4 ]= lumSrcPtr[i+(vLumFilterSize>1)];
161 lumMmxFilter[s*i+
APCK_COEF/4+1]= vLumFilter[dstY*vLumFilterSize + i ]
162 + (vLumFilterSize>1 ? vLumFilter[dstY*vLumFilterSize + i + 1]<<16 : 0);
163 if (CONFIG_SWSCALE_ALPHA && alpPixBuf) {
164 *(
const void**)&alpMmxFilter[s*i ]= alpSrcPtr[i ];
165 *(
const void**)&alpMmxFilter[s*i+
APCK_PTR2/4 ]= alpSrcPtr[i+(vLumFilterSize>1)];
171 *(
const void**)&chrMmxFilter[s*i ]= chrUSrcPtr[i ];
172 *(
const void**)&chrMmxFilter[s*i+
APCK_PTR2/4 ]= chrUSrcPtr[i+(vChrFilterSize>1)];
174 chrMmxFilter[s*i+
APCK_COEF/4+1]= vChrFilter[chrDstY*vChrFilterSize + i ]
175 + (vChrFilterSize>1 ? vChrFilter[chrDstY*vChrFilterSize + i + 1]<<16 : 0);
179 *(
const void**)&lumMmxFilter[4*i+0]= lumSrcPtr[i];
182 ((uint16_t)vLumFilter[dstY*vLumFilterSize + i])*0x10001
U;
183 if (CONFIG_SWSCALE_ALPHA && alpPixBuf) {
184 *(
const void**)&alpMmxFilter[4*i+0]= alpSrcPtr[i];
186 alpMmxFilter[4*i+3]= lumMmxFilter[4*i+2];
190 *(
const void**)&chrMmxFilter[4*i+0]= chrUSrcPtr[i];
193 ((uint16_t)vChrFilter[chrDstY*vChrFilterSize + i])*0x10001
U;
200 static void yuv2yuvX_sse3(
const int16_t *
filter,
int filterSize,
204 if(((
int)dest) & 15){
205 yuv2yuvX_mmxext(filter, filterSize, src, dest, dstW, dither, offset);
209 __asm__
volatile(
"movq (%0), %%xmm3\n\t"
210 "movdqa %%xmm3, %%xmm4\n\t"
211 "psrlq $24, %%xmm3\n\t"
212 "psllq $40, %%xmm4\n\t"
213 "por %%xmm4, %%xmm3\n\t"
217 __asm__
volatile(
"movq (%0), %%xmm3\n\t"
223 "pxor %%xmm0, %%xmm0\n\t"
224 "punpcklbw %%xmm0, %%xmm3\n\t"
225 "movd %0, %%xmm1\n\t"
226 "punpcklwd %%xmm1, %%xmm1\n\t"
227 "punpckldq %%xmm1, %%xmm1\n\t"
228 "punpcklqdq %%xmm1, %%xmm1\n\t"
229 "psllw $3, %%xmm1\n\t"
230 "paddw %%xmm1, %%xmm3\n\t"
231 "psraw $4, %%xmm3\n\t"
235 "movdqa %%xmm3, %%xmm4\n\t"
236 "movdqa %%xmm3, %%xmm7\n\t"
238 "mov %0, %%"REG_d
" \n\t"\
239 "mov (%%"REG_d
"), %%"REG_S
" \n\t"\
242 "movddup 8(%%"REG_d
"), %%xmm0 \n\t" \
243 "movdqa (%%"REG_S
", %%"REG_c
", 2), %%xmm2 \n\t" \
244 "movdqa 16(%%"REG_S
", %%"REG_c
", 2), %%xmm5 \n\t" \
245 "add $16, %%"REG_d
" \n\t"\
246 "mov (%%"REG_d
"), %%"REG_S
" \n\t"\
247 "test %%"REG_S
", %%"REG_S
" \n\t"\
248 "pmulhw %%xmm0, %%xmm2 \n\t"\
249 "pmulhw %%xmm0, %%xmm5 \n\t"\
250 "paddw %%xmm2, %%xmm3 \n\t"\
251 "paddw %%xmm5, %%xmm4 \n\t"\
253 "psraw $3, %%xmm3 \n\t"\
254 "psraw $3, %%xmm4 \n\t"\
255 "packuswb %%xmm4, %%xmm3 \n\t"
256 "movntdq %%xmm3, (%1, %%"REG_c
")\n\t"
257 "add $16, %%"REG_c
" \n\t"\
258 "cmp %2, %%"REG_c
" \n\t"\
259 "movdqa %%xmm7, %%xmm3\n\t"
260 "movdqa %%xmm7, %%xmm4\n\t"
261 "mov %0, %%"REG_d
" \n\t"\
262 "mov (%%"REG_d
"), %%"REG_S
" \n\t"\
265 "r" (dest-offset),
"g" ((
x86_reg)(dstW+offset)),
"m" (offset)
266 :
XMM_CLOBBERS(
"%xmm0" ,
"%xmm1" ,
"%xmm2" ,
"%xmm3" ,
"%xmm4" ,
"%xmm5" ,
"%xmm7" ,)
267 "%"REG_d,
"%"REG_S,
"%"REG_c
274 #define SCALE_FUNC(filter_n, from_bpc, to_bpc, opt) \
275 void ff_hscale ## from_bpc ## to ## to_bpc ## _ ## filter_n ## _ ## opt( \
276 SwsContext *c, int16_t *data, \
277 int dstW, const uint8_t *src, \
278 const int16_t *filter, \
279 const int32_t *filterPos, int filterSize)
281 #define SCALE_FUNCS(filter_n, opt) \
282 SCALE_FUNC(filter_n, 8, 15, opt); \
283 SCALE_FUNC(filter_n, 9, 15, opt); \
284 SCALE_FUNC(filter_n, 10, 15, opt); \
285 SCALE_FUNC(filter_n, 12, 15, opt); \
286 SCALE_FUNC(filter_n, 14, 15, opt); \
287 SCALE_FUNC(filter_n, 16, 15, opt); \
288 SCALE_FUNC(filter_n, 8, 19, opt); \
289 SCALE_FUNC(filter_n, 9, 19, opt); \
290 SCALE_FUNC(filter_n, 10, 19, opt); \
291 SCALE_FUNC(filter_n, 12, 19, opt); \
292 SCALE_FUNC(filter_n, 14, 19, opt); \
293 SCALE_FUNC(filter_n, 16, 19, opt)
295 #define SCALE_FUNCS_MMX(opt) \
296 SCALE_FUNCS(4, opt); \
297 SCALE_FUNCS(8, opt); \
300 #define SCALE_FUNCS_SSE(opt) \
301 SCALE_FUNCS(4, opt); \
302 SCALE_FUNCS(8, opt); \
303 SCALE_FUNCS(X4, opt); \
313 #define VSCALEX_FUNC(size, opt) \
314 void ff_yuv2planeX_ ## size ## _ ## opt(const int16_t *filter, int filterSize, \
315 const int16_t **src, uint8_t *dest, int dstW, \
316 const uint8_t *dither, int offset)
317 #define VSCALEX_FUNCS(opt) \
318 VSCALEX_FUNC(8, opt); \
319 VSCALEX_FUNC(9, opt); \
320 VSCALEX_FUNC(10, opt)
330 #define VSCALE_FUNC(size, opt) \
331 void ff_yuv2plane1_ ## size ## _ ## opt(const int16_t *src, uint8_t *dst, int dstW, \
332 const uint8_t *dither, int offset)
333 #define VSCALE_FUNCS(opt1, opt2) \
334 VSCALE_FUNC(8, opt1); \
335 VSCALE_FUNC(9, opt2); \
336 VSCALE_FUNC(10, opt2); \
337 VSCALE_FUNC(16, opt1)
346 #define INPUT_Y_FUNC(fmt, opt) \
347 void ff_ ## fmt ## ToY_ ## opt(uint8_t *dst, const uint8_t *src, \
348 const uint8_t *unused1, const uint8_t *unused2, \
349 int w, uint32_t *unused)
350 #define INPUT_UV_FUNC(fmt, opt) \
351 void ff_ ## fmt ## ToUV_ ## opt(uint8_t *dstU, uint8_t *dstV, \
352 const uint8_t *unused0, \
353 const uint8_t *src1, \
354 const uint8_t *src2, \
355 int w, uint32_t *unused)
356 #define INPUT_FUNC(fmt, opt) \
357 INPUT_Y_FUNC(fmt, opt); \
358 INPUT_UV_FUNC(fmt, opt)
359 #define INPUT_FUNCS(opt) \
360 INPUT_FUNC(uyvy, opt); \
361 INPUT_FUNC(yuyv, opt); \
362 INPUT_UV_FUNC(nv12, opt); \
363 INPUT_UV_FUNC(nv21, opt); \
364 INPUT_FUNC(rgba, opt); \
365 INPUT_FUNC(bgra, opt); \
366 INPUT_FUNC(argb, opt); \
367 INPUT_FUNC(abgr, opt); \
368 INPUT_FUNC(rgb24, opt); \
369 INPUT_FUNC(bgr24, opt)
384 sws_init_swscale_mmx(c);
386 #if HAVE_MMXEXT_INLINE
388 sws_init_swscale_mmxext(c);
395 #define ASSIGN_SCALE_FUNC2(hscalefn, filtersize, opt1, opt2) do { \
396 if (c->srcBpc == 8) { \
397 hscalefn = c->dstBpc <= 14 ? ff_hscale8to15_ ## filtersize ## _ ## opt2 : \
398 ff_hscale8to19_ ## filtersize ## _ ## opt1; \
399 } else if (c->srcBpc == 9) { \
400 hscalefn = c->dstBpc <= 14 ? ff_hscale9to15_ ## filtersize ## _ ## opt2 : \
401 ff_hscale9to19_ ## filtersize ## _ ## opt1; \
402 } else if (c->srcBpc == 10) { \
403 hscalefn = c->dstBpc <= 14 ? ff_hscale10to15_ ## filtersize ## _ ## opt2 : \
404 ff_hscale10to19_ ## filtersize ## _ ## opt1; \
405 } else if (c->srcBpc == 12) { \
406 hscalefn = c->dstBpc <= 14 ? ff_hscale12to15_ ## filtersize ## _ ## opt2 : \
407 ff_hscale12to19_ ## filtersize ## _ ## opt1; \
408 } else if (c->srcBpc == 14 || ((c->srcFormat==AV_PIX_FMT_PAL8||isAnyRGB(c->srcFormat)) && av_pix_fmt_desc_get(c->srcFormat)->comp[0].depth_minus1<15)) { \
409 hscalefn = c->dstBpc <= 14 ? ff_hscale14to15_ ## filtersize ## _ ## opt2 : \
410 ff_hscale14to19_ ## filtersize ## _ ## opt1; \
412 av_assert0(c->srcBpc == 16);\
413 hscalefn = c->dstBpc <= 14 ? ff_hscale16to15_ ## filtersize ## _ ## opt2 : \
414 ff_hscale16to19_ ## filtersize ## _ ## opt1; \
417 #define ASSIGN_MMX_SCALE_FUNC(hscalefn, filtersize, opt1, opt2) \
418 switch (filtersize) { \
419 case 4: ASSIGN_SCALE_FUNC2(hscalefn, 4, opt1, opt2); break; \
420 case 8: ASSIGN_SCALE_FUNC2(hscalefn, 8, opt1, opt2); break; \
421 default: ASSIGN_SCALE_FUNC2(hscalefn, X, opt1, opt2); break; \
423 #define ASSIGN_VSCALEX_FUNC(vscalefn, opt, do_16_case, condition_8bit) \
425 case 16: do_16_case; break; \
426 case 10: if (!isBE(c->dstFormat)) vscalefn = ff_yuv2planeX_10_ ## opt; break; \
427 case 9: if (!isBE(c->dstFormat)) vscalefn = ff_yuv2planeX_9_ ## opt; break; \
428 default: if (condition_8bit) break; \
430 #define ASSIGN_VSCALE_FUNC(vscalefn, opt1, opt2, opt2chk) \
432 case 16: if (!isBE(c->dstFormat)) vscalefn = ff_yuv2plane1_16_ ## opt1; break; \
433 case 10: if (!isBE(c->dstFormat) && opt2chk) vscalefn = ff_yuv2plane1_10_ ## opt2; break; \
434 case 9: if (!isBE(c->dstFormat) && opt2chk) vscalefn = ff_yuv2plane1_9_ ## opt2; break; \
435 case 8: vscalefn = ff_yuv2plane1_8_ ## opt1; break; \
436 default: av_assert0(c->dstBpc>8); \
438 #define case_rgb(x, X, opt) \
439 case AV_PIX_FMT_ ## X: \
440 c->lumToYV12 = ff_ ## x ## ToY_ ## opt; \
441 if (!c->chrSrcHSubSample) \
442 c->chrToYV12 = ff_ ## x ## ToUV_ ## opt; \
484 #define ASSIGN_SSE_SCALE_FUNC(hscalefn, filtersize, opt1, opt2) \
485 switch (filtersize) { \
486 case 4: ASSIGN_SCALE_FUNC2(hscalefn, 4, opt1, opt2); break; \
487 case 8: ASSIGN_SCALE_FUNC2(hscalefn, 8, opt1, opt2); break; \
488 default: if (filtersize & 4) ASSIGN_SCALE_FUNC2(hscalefn, X4, opt1, opt2); \
489 else ASSIGN_SCALE_FUNC2(hscalefn, X8, opt1, opt2); \
496 HAVE_ALIGNED_STACK || ARCH_X86_64);
545 HAVE_ALIGNED_STACK || ARCH_X86_64);
552 HAVE_ALIGNED_STACK || ARCH_X86_64);