35 0x0200020002000200LL,};
39 0x0004000400040004LL,};
69 #define COMPILE_TEMPLATE_MMXEXT 0 70 #define RENAME(a) a ## _mmx 75 #if HAVE_MMXEXT_INLINE 77 #undef COMPILE_TEMPLATE_MMXEXT 78 #define COMPILE_TEMPLATE_MMXEXT 1 79 #define RENAME(a) a ## _mmxext 103 const int firstLumSrcY= vLumFilterPos[
dstY];
104 const int firstChrSrcY= vChrFilterPos[chrDstY];
112 if (dstY < dstH - 2) {
113 const int16_t **lumSrcPtr = (
const int16_t **)(
void*) lumPlane->
line + firstLumSrcY - lumPlane->
sliceY;
114 const int16_t **chrUSrcPtr = (
const int16_t **)(
void*) chrUPlane->
line + firstChrSrcY - chrUPlane->
sliceY;
115 const int16_t **alpSrcPtr = (CONFIG_SWSCALE_ALPHA && hasAlpha) ? (
const int16_t **)(
void*) alpPlane->
line + firstLumSrcY - alpPlane->
sliceY :
NULL;
118 if (firstLumSrcY < 0 || firstLumSrcY + vLumFilterSize > c->
srcH) {
119 const int16_t **tmpY = (
const int16_t **) lumPlane->
tmp;
121 int neg = -firstLumSrcY,
i,
end =
FFMIN(c->
srcH - firstLumSrcY, vLumFilterSize);
122 for (
i = 0;
i < neg;
i++)
123 tmpY[
i] = lumSrcPtr[neg];
124 for ( ;
i <
end;
i++)
125 tmpY[
i] = lumSrcPtr[
i];
131 const int16_t **tmpA = (
const int16_t **) alpPlane->
tmp;
133 tmpA[
i] = alpSrcPtr[neg];
134 for ( ;
i <
end;
i++)
135 tmpA[
i] = alpSrcPtr[
i];
137 tmpA[
i] = tmpA[
i - 1];
141 if (firstChrSrcY < 0 || firstChrSrcY + vChrFilterSize > c->
chrSrcH) {
142 const int16_t **tmpU = (
const int16_t **) chrUPlane->
tmp;
143 int neg = -firstChrSrcY,
i,
end =
FFMIN(c->
chrSrcH - firstChrSrcY, vChrFilterSize);
144 for (
i = 0;
i < neg;
i++) {
145 tmpU[
i] = chrUSrcPtr[neg];
147 for ( ;
i <
end;
i++) {
148 tmpU[
i] = chrUSrcPtr[
i];
151 tmpU[
i] = tmpU[
i - 1];
159 *(
const void**)&lumMmxFilter[s*
i ]= lumSrcPtr[
i ];
160 *(
const void**)&lumMmxFilter[s*
i+
APCK_PTR2/4 ]= lumSrcPtr[
i+(vLumFilterSize>1)];
162 lumMmxFilter[s*
i+
APCK_COEF/4+1]= vLumFilter[dstY*vLumFilterSize +
i ]
163 + (vLumFilterSize>1 ? vLumFilter[dstY*vLumFilterSize +
i + 1] * (1 << 16) : 0);
164 if (CONFIG_SWSCALE_ALPHA && hasAlpha) {
165 *(
const void**)&alpMmxFilter[s*
i ]= alpSrcPtr[
i ];
166 *(
const void**)&alpMmxFilter[s*
i+
APCK_PTR2/4 ]= alpSrcPtr[
i+(vLumFilterSize>1)];
172 *(
const void**)&chrMmxFilter[s*
i ]= chrUSrcPtr[
i ];
173 *(
const void**)&chrMmxFilter[s*
i+
APCK_PTR2/4 ]= chrUSrcPtr[
i+(vChrFilterSize>1)];
175 chrMmxFilter[s*
i+
APCK_COEF/4+1]= vChrFilter[chrDstY*vChrFilterSize +
i ]
176 + (vChrFilterSize>1 ? vChrFilter[chrDstY*vChrFilterSize +
i + 1] * (1 << 16) : 0);
180 *(
const void**)&lumMmxFilter[4*
i+0]= lumSrcPtr[
i];
183 ((uint16_t)vLumFilter[dstY*vLumFilterSize +
i])*0x10001
U;
184 if (CONFIG_SWSCALE_ALPHA && hasAlpha) {
185 *(
const void**)&alpMmxFilter[4*
i+0]= alpSrcPtr[
i];
187 alpMmxFilter[4*
i+3]= lumMmxFilter[4*
i+2];
191 *(
const void**)&chrMmxFilter[4*
i+0]= chrUSrcPtr[
i];
194 ((uint16_t)vChrFilter[chrDstY*vChrFilterSize +
i])*0x10001
U;
201 static void yuv2yuvX_sse3(
const int16_t *
filter,
int filterSize,
205 if(((uintptr_t)dest) & 15){
210 #define MAIN_FUNCTION \ 211 "pxor %%xmm0, %%xmm0 \n\t" \ 212 "punpcklbw %%xmm0, %%xmm3 \n\t" \ 213 "movd %4, %%xmm1 \n\t" \ 214 "punpcklwd %%xmm1, %%xmm1 \n\t" \ 215 "punpckldq %%xmm1, %%xmm1 \n\t" \ 216 "punpcklqdq %%xmm1, %%xmm1 \n\t" \ 217 "psllw $3, %%xmm1 \n\t" \ 218 "paddw %%xmm1, %%xmm3 \n\t" \ 219 "psraw $4, %%xmm3 \n\t" \ 220 "movdqa %%xmm3, %%xmm4 \n\t" \ 221 "movdqa %%xmm3, %%xmm7 \n\t" \ 222 "movl %3, %%ecx \n\t" \ 223 "mov %0, %%"FF_REG_d" \n\t"\ 224 "mov (%%"FF_REG_d"), %%"FF_REG_S" \n\t"\ 227 "movddup 8(%%"FF_REG_d"), %%xmm0 \n\t" \ 228 "movdqa (%%"FF_REG_S", %%"FF_REG_c", 2), %%xmm2 \n\t" \ 229 "movdqa 16(%%"FF_REG_S", %%"FF_REG_c", 2), %%xmm5 \n\t" \ 230 "add $16, %%"FF_REG_d" \n\t"\ 231 "mov (%%"FF_REG_d"), %%"FF_REG_S" \n\t"\ 232 "test %%"FF_REG_S", %%"FF_REG_S" \n\t"\ 233 "pmulhw %%xmm0, %%xmm2 \n\t"\ 234 "pmulhw %%xmm0, %%xmm5 \n\t"\ 235 "paddw %%xmm2, %%xmm3 \n\t"\ 236 "paddw %%xmm5, %%xmm4 \n\t"\ 238 "psraw $3, %%xmm3 \n\t"\ 239 "psraw $3, %%xmm4 \n\t"\ 240 "packuswb %%xmm4, %%xmm3 \n\t"\ 241 "movntdq %%xmm3, (%1, %%"FF_REG_c") \n\t"\ 242 "add $16, %%"FF_REG_c" \n\t"\ 243 "cmp %2, %%"FF_REG_c" \n\t"\ 244 "movdqa %%xmm7, %%xmm3 \n\t" \ 245 "movdqa %%xmm7, %%xmm4 \n\t" \ 246 "mov %0, %%"FF_REG_d" \n\t"\ 247 "mov (%%"FF_REG_d"), %%"FF_REG_S" \n\t"\ 252 "movq %5, %%xmm3 \n\t" 253 "movdqa %%xmm3, %%xmm4 \n\t" 254 "psrlq $24, %%xmm3 \n\t" 255 "psllq $40, %%xmm4 \n\t" 256 "por %%xmm4, %%xmm3 \n\t" 259 "r" (dest-offset),
"g" ((
x86_reg)(
dstW+offset)),
"m" (offset),
260 "m"(filterSize),
"m"(((uint64_t *)
dither)[0])
261 :
XMM_CLOBBERS(
"%xmm0" ,
"%xmm1" ,
"%xmm2" ,
"%xmm3" ,
"%xmm4" ,
"%xmm5" ,
"%xmm7" ,)
262 "%"FF_REG_d,
"%"FF_REG_S,
"%"FF_REG_c
266 "movq %5, %%xmm3 \n\t" 269 "r" (dest-offset),
"g" ((
x86_reg)(
dstW+offset)),
"m" (offset),
270 "m"(filterSize),
"m"(((uint64_t *)
dither)[0])
271 :
XMM_CLOBBERS(
"%xmm0" ,
"%xmm1" ,
"%xmm2" ,
"%xmm3" ,
"%xmm4" ,
"%xmm5" ,
"%xmm7" ,)
272 "%"FF_REG_d,
"%"FF_REG_S,
"%"FF_REG_c
280 #define SCALE_FUNC(filter_n, from_bpc, to_bpc, opt) \ 281 void ff_hscale ## from_bpc ## to ## to_bpc ## _ ## filter_n ## _ ## opt( \ 282 SwsContext *c, int16_t *data, \ 283 int dstW, const uint8_t *src, \ 284 const int16_t *filter, \ 285 const int32_t *filterPos, int filterSize) 287 #define SCALE_FUNCS(filter_n, opt) \ 288 SCALE_FUNC(filter_n, 8, 15, opt); \ 289 SCALE_FUNC(filter_n, 9, 15, opt); \ 290 SCALE_FUNC(filter_n, 10, 15, opt); \ 291 SCALE_FUNC(filter_n, 12, 15, opt); \ 292 SCALE_FUNC(filter_n, 14, 15, opt); \ 293 SCALE_FUNC(filter_n, 16, 15, opt); \ 294 SCALE_FUNC(filter_n, 8, 19, opt); \ 295 SCALE_FUNC(filter_n, 9, 19, opt); \ 296 SCALE_FUNC(filter_n, 10, 19, opt); \ 297 SCALE_FUNC(filter_n, 12, 19, opt); \ 298 SCALE_FUNC(filter_n, 14, 19, opt); \ 299 SCALE_FUNC(filter_n, 16, 19, opt) 301 #define SCALE_FUNCS_MMX(opt) \ 302 SCALE_FUNCS(4, opt); \ 303 SCALE_FUNCS(8, opt); \ 306 #define SCALE_FUNCS_SSE(opt) \ 307 SCALE_FUNCS(4, opt); \ 308 SCALE_FUNCS(8, opt); \ 309 SCALE_FUNCS(X4, opt); \ 319 #define VSCALEX_FUNC(size, opt) \ 320 void ff_yuv2planeX_ ## size ## _ ## opt(const int16_t *filter, int filterSize, \ 321 const int16_t **src, uint8_t *dest, int dstW, \ 322 const uint8_t *dither, int offset) 323 #define VSCALEX_FUNCS(opt) \ 324 VSCALEX_FUNC(8, opt); \ 325 VSCALEX_FUNC(9, opt); \ 326 VSCALEX_FUNC(10, opt) 336 #define VSCALE_FUNC(size, opt) \ 337 void ff_yuv2plane1_ ## size ## _ ## opt(const int16_t *src, uint8_t *dst, int dstW, \ 338 const uint8_t *dither, int offset) 339 #define VSCALE_FUNCS(opt1, opt2) \ 340 VSCALE_FUNC(8, opt1); \ 341 VSCALE_FUNC(9, opt2); \ 342 VSCALE_FUNC(10, opt2); \ 343 VSCALE_FUNC(16, opt1) 352 #define INPUT_Y_FUNC(fmt, opt) \ 353 void ff_ ## fmt ## ToY_ ## opt(uint8_t *dst, const uint8_t *src, \ 354 const uint8_t *unused1, const uint8_t *unused2, \ 355 int w, uint32_t *unused) 356 #define INPUT_UV_FUNC(fmt, opt) \ 357 void ff_ ## fmt ## ToUV_ ## opt(uint8_t *dstU, uint8_t *dstV, \ 358 const uint8_t *unused0, \ 359 const uint8_t *src1, \ 360 const uint8_t *src2, \ 361 int w, uint32_t *unused) 362 #define INPUT_FUNC(fmt, opt) \ 363 INPUT_Y_FUNC(fmt, opt); \ 364 INPUT_UV_FUNC(fmt, opt) 365 #define INPUT_FUNCS(opt) \ 366 INPUT_FUNC(uyvy, opt); \ 367 INPUT_FUNC(yuyv, opt); \ 368 INPUT_UV_FUNC(nv12, opt); \ 369 INPUT_UV_FUNC(nv21, opt); \ 370 INPUT_FUNC(rgba, opt); \ 371 INPUT_FUNC(bgra, opt); \ 372 INPUT_FUNC(argb, opt); \ 373 INPUT_FUNC(abgr, opt); \ 374 INPUT_FUNC(rgb24, opt); \ 375 INPUT_FUNC(bgr24, opt) 385 #define YUV2NV_DECL(fmt, opt) \ 386 void ff_yuv2 ## fmt ## cX_ ## opt(enum AVPixelFormat format, const uint8_t *dither, \ 387 const int16_t *filter, int filterSize, \ 388 const int16_t **u, const int16_t **v, \ 389 uint8_t *dst, int dstWidth) 391 YUV2NV_DECL(nv12, avx2);
392 YUV2NV_DECL(nv21, avx2);
401 sws_init_swscale_mmx(c);
403 #if HAVE_MMXEXT_INLINE 405 sws_init_swscale_mmxext(c);
412 #define ASSIGN_SCALE_FUNC2(hscalefn, filtersize, opt1, opt2) do { \ 413 if (c->srcBpc == 8) { \ 414 hscalefn = c->dstBpc <= 14 ? ff_hscale8to15_ ## filtersize ## _ ## opt2 : \ 415 ff_hscale8to19_ ## filtersize ## _ ## opt1; \ 416 } else if (c->srcBpc == 9) { \ 417 hscalefn = c->dstBpc <= 14 ? ff_hscale9to15_ ## filtersize ## _ ## opt2 : \ 418 ff_hscale9to19_ ## filtersize ## _ ## opt1; \ 419 } else if (c->srcBpc == 10) { \ 420 hscalefn = c->dstBpc <= 14 ? ff_hscale10to15_ ## filtersize ## _ ## opt2 : \ 421 ff_hscale10to19_ ## filtersize ## _ ## opt1; \ 422 } else if (c->srcBpc == 12) { \ 423 hscalefn = c->dstBpc <= 14 ? ff_hscale12to15_ ## filtersize ## _ ## opt2 : \ 424 ff_hscale12to19_ ## filtersize ## _ ## opt1; \ 425 } else if (c->srcBpc == 14 || ((c->srcFormat==AV_PIX_FMT_PAL8||isAnyRGB(c->srcFormat)) && av_pix_fmt_desc_get(c->srcFormat)->comp[0].depth<16)) { \ 426 hscalefn = c->dstBpc <= 14 ? ff_hscale14to15_ ## filtersize ## _ ## opt2 : \ 427 ff_hscale14to19_ ## filtersize ## _ ## opt1; \ 429 av_assert0(c->srcBpc == 16);\ 430 hscalefn = c->dstBpc <= 14 ? ff_hscale16to15_ ## filtersize ## _ ## opt2 : \ 431 ff_hscale16to19_ ## filtersize ## _ ## opt1; \ 434 #define ASSIGN_MMX_SCALE_FUNC(hscalefn, filtersize, opt1, opt2) \ 435 switch (filtersize) { \ 436 case 4: ASSIGN_SCALE_FUNC2(hscalefn, 4, opt1, opt2); break; \ 437 case 8: ASSIGN_SCALE_FUNC2(hscalefn, 8, opt1, opt2); break; \ 438 default: ASSIGN_SCALE_FUNC2(hscalefn, X, opt1, opt2); break; \ 440 #define ASSIGN_VSCALEX_FUNC(vscalefn, opt, do_16_case, condition_8bit) \ 442 case 16: do_16_case; break; \ 443 case 10: if (!isBE(c->dstFormat) && c->dstFormat != AV_PIX_FMT_P010LE) vscalefn = ff_yuv2planeX_10_ ## opt; break; \ 444 case 9: if (!isBE(c->dstFormat)) vscalefn = ff_yuv2planeX_9_ ## opt; break; \ 445 case 8: if ((condition_8bit) && !c->use_mmx_vfilter) vscalefn = ff_yuv2planeX_8_ ## opt; break; \ 447 #define ASSIGN_VSCALE_FUNC(vscalefn, opt1, opt2, opt2chk) \ 449 case 16: if (!isBE(c->dstFormat)) vscalefn = ff_yuv2plane1_16_ ## opt1; break; \ 450 case 10: if (!isBE(c->dstFormat) && c->dstFormat != AV_PIX_FMT_P010LE && opt2chk) vscalefn = ff_yuv2plane1_10_ ## opt2; break; \ 451 case 9: if (!isBE(c->dstFormat) && opt2chk) vscalefn = ff_yuv2plane1_9_ ## opt2; break; \ 452 case 8: vscalefn = ff_yuv2plane1_8_ ## opt1; break; \ 453 default: av_assert0(c->dstBpc>8); \ 455 #define case_rgb(x, X, opt) \ 456 case AV_PIX_FMT_ ## X: \ 457 c->lumToYV12 = ff_ ## x ## ToY_ ## opt; \ 458 if (!c->chrSrcHSubSample) \ 459 c->chrToYV12 = ff_ ## x ## ToUV_ ## opt; \ 501 #define ASSIGN_SSE_SCALE_FUNC(hscalefn, filtersize, opt1, opt2) \ 502 switch (filtersize) { \ 503 case 4: ASSIGN_SCALE_FUNC2(hscalefn, 4, opt1, opt2); break; \ 504 case 8: ASSIGN_SCALE_FUNC2(hscalefn, 8, opt1, opt2); break; \ 505 default: if (filtersize & 4) ASSIGN_SCALE_FUNC2(hscalefn, X4, opt1, opt2); \ 506 else ASSIGN_SCALE_FUNC2(hscalefn, X8, opt1, opt2); \ 513 HAVE_ALIGNED_STACK || ARCH_X86_64);
562 HAVE_ALIGNED_STACK || ARCH_X86_64);
569 HAVE_ALIGNED_STACK || ARCH_X86_64);
#define EXTERNAL_MMX(flags)
packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1
void(* hcScale)(struct SwsContext *c, int16_t *dst, int dstW, const uint8_t *src, const int16_t *filter, const int32_t *filterPos, int filterSize)
int chrSrcH
Height of source chroma planes.
8 bits gray, 8 bits alpha
#define VSCALE_FUNC(size, opt)
#define SCALE_FUNCS_MMX(opt)
void(* chrToYV12)(uint8_t *dstU, uint8_t *dstV, const uint8_t *src1, const uint8_t *src2, const uint8_t *src3, int width, uint32_t *pal)
Unscaled conversion of chroma planes to YV12 for horizontal scaler.
void(* alpToYV12)(uint8_t *dst, const uint8_t *src, const uint8_t *src2, const uint8_t *src3, int width, uint32_t *pal)
Unscaled conversion of alpha plane to YV12 for horizontal scaler.
void(* hyScale)(struct SwsContext *c, int16_t *dst, int dstW, const uint8_t *src, const int16_t *filter, const int32_t *filterPos, int filterSize)
Scale one horizontal line of input data using a filter over the input lines, to produce one (differen...
static atomic_int cpu_flags
int dstY
Last destination vertical line output from last slice.
#define case_rgb(x, X, opt)
Macro definitions for various function/variable attributes.
#define ASSIGN_SSE_SCALE_FUNC(hscalefn, filtersize, opt1, opt2)
int srcH
Height of source luma/alpha planes.
#define VSCALE_FUNCS(opt1, opt2)
#define EXTERNAL_SSE4(flags)
int chrDstVSubSample
Binary logarithm of vertical subsampling factor between luma/alpha and chroma planes in destination i...
uint8_t ** line
line buffer
int vChrFilterSize
Vertical filter size for chroma pixels.
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
static av_cold int end(AVCodecContext *avctx)
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
#define AV_CPU_FLAG_MMXEXT
SSE integer functions or AMD MMX ext.
av_cold void ff_sws_init_swscale_x86(SwsContext *c)
#define DECLARE_ALIGNED(n, t, v)
Declare a variable that is aligned in memory.
enum AVPixelFormat dstFormat
Destination pixel format.
#define EXTERNAL_SSE2(flags)
#define VSCALEX_FUNCS(opt)
int32_t * vChrFilterPos
Array of vertical filter starting positions for each dst[i] for chroma planes.
int dstH
Height of destination luma/alpha planes.
#define EXTERNAL_AVX2_FAST(flags)
#define INLINE_MMX(flags)
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce then the filter should push the output frames on the output link immediately As an exception to the previous rule if the input frame is enough to produce several output frames then the filter needs output only at least one per link The additional frames can be left buffered in the filter
int hLumFilterSize
Horizontal filter size for luma/alpha pixels.
static const uint8_t dither[8][8]
planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
simple assert() macros that are a bit more flexible than ISO C assert().
SwsPlane plane[MAX_SLICE_PLANES]
color planes
int32_t alpMmxFilter[4 *MAX_FILTER_SIZE]
int hChrFilterSize
Horizontal filter size for chroma pixels.
#define DECLARE_ASM_CONST(n, t, v)
Declare a static constant aligned variable appropriate for use in inline assembly code...
planar YUV 4:4:4, 24bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
as above, but U and V bytes are swapped
void ff_updateMMXDitherTables(SwsContext *c, int dstY)
#define AV_CPU_FLAG_SSE3
Prescott SSE3 functions.
yuv2planar1_fn yuv2plane1
#define SCALE_FUNCS_SSE(opt)
yuv2interleavedX_fn yuv2nv12cX
#define XMM_CLOBBERS(...)
int dstW
Width of destination luma/alpha planes.
int32_t * vLumFilterPos
Array of vertical filter starting positions for each dst[i] for luma/alpha planes.
#define AV_PIX_FMT_BGR555
static av_always_inline int isBE(enum AVPixelFormat pix_fmt)
int32_t lumMmxFilter[4 *MAX_FILTER_SIZE]
const uint64_t ff_dither4[2]
#define ASSIGN_MMX_SCALE_FUNC(hscalefn, filtersize, opt1, opt2)
yuv2planarX_fn yuv2planeX
#define DECLARE_ASM_ALIGNED(n, t, v)
Declare an aligned variable appropriate for use in inline assembly code.
packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr
#define EXTERNAL_SSSE3(flags)
as above, but U and V bytes are swapped
int vLumFilterSize
Vertical filter size for luma/alpha pixels.
int16_t * vChrFilter
Array of vertical filter coefficients for chroma planes.
int av_get_cpu_flags(void)
Return the flags which specify extensions supported by the CPU.
#define flags(name, subs,...)
#define EXTERNAL_MMXEXT(flags)
#define VSCALEX_FUNC(size, opt)
#define INLINE_MMXEXT(flags)
__asm__(".macro parse_r var r\n\t""\\var = -1\n\t"_IFC_REG(0) _IFC_REG(1) _IFC_REG(2) _IFC_REG(3) _IFC_REG(4) _IFC_REG(5) _IFC_REG(6) _IFC_REG(7) _IFC_REG(8) _IFC_REG(9) _IFC_REG(10) _IFC_REG(11) _IFC_REG(12) _IFC_REG(13) _IFC_REG(14) _IFC_REG(15) _IFC_REG(16) _IFC_REG(17) _IFC_REG(18) _IFC_REG(19) _IFC_REG(20) _IFC_REG(21) _IFC_REG(22) _IFC_REG(23) _IFC_REG(24) _IFC_REG(25) _IFC_REG(26) _IFC_REG(27) _IFC_REG(28) _IFC_REG(29) _IFC_REG(30) _IFC_REG(31)".iflt \\var\n\t"".error \"Unable to parse register name \\r\"\n\t"".endif\n\t"".endm")
enum AVPixelFormat srcFormat
Source pixel format.
int32_t chrMmxFilter[4 *MAX_FILTER_SIZE]
#define AV_PIX_FMT_RGB555
#define ASSIGN_VSCALEX_FUNC(vscalefn, opt, do_16_case, condition_8bit)
#define ASSIGN_VSCALE_FUNC(vscalefn, opt1, opt2, opt2chk)
uint8_t ** tmp
Tmp line buffer used by mmx code.
void(* lumToYV12)(uint8_t *dst, const uint8_t *src, const uint8_t *src2, const uint8_t *src3, int width, uint32_t *pal)
Unscaled conversion of luma plane to YV12 for horizontal scaler.
int16_t * vLumFilter
Array of vertical filter coefficients for luma/alpha planes.
int sliceY
index of first line
int flags
Flags passed by the user to select scaler algorithm, optimizations, subsampling, etc...
#define EXTERNAL_AVX(flags)
const uint64_t ff_dither8[2]