40 { 36, 68, 60, 92, 34, 66, 58, 90, },
41 { 100, 4, 124, 28, 98, 2, 122, 26, },
42 { 52, 84, 44, 76, 50, 82, 42, 74, },
43 { 116, 20, 108, 12, 114, 18, 106, 10, },
44 { 32, 64, 56, 88, 38, 70, 62, 94, },
45 { 96, 0, 120, 24, 102, 6, 126, 30, },
46 { 48, 80, 40, 72, 54, 86, 46, 78, },
47 { 112, 16, 104, 8, 118, 22, 110, 14, },
48 { 36, 68, 60, 92, 34, 66, 58, 90, },
52 64, 64, 64, 64, 64, 64, 64, 64
59 uint8_t *ptr = plane + stride * y;
60 for (i = 0; i <
height; i++) {
61 memset(ptr, val, width);
68 const int32_t *filterPos,
int filterSize)
73 const uint16_t *
src = (
const uint16_t *) _src;
83 for (i = 0; i < dstW; i++) {
85 int srcPos = filterPos[i];
88 for (j = 0; j < filterSize; j++) {
89 val += src[srcPos + j] * filter[filterSize * i + j];
92 dst[i] =
FFMIN(val >> sh, (1 << 19) - 1);
98 const int32_t *filterPos,
int filterSize)
102 const uint16_t *
src = (
const uint16_t *) _src;
111 for (i = 0; i < dstW; i++) {
113 int srcPos = filterPos[i];
116 for (j = 0; j < filterSize; j++) {
117 val += src[srcPos + j] * filter[filterSize * i + j];
120 dst[i] =
FFMIN(val >> sh, (1 << 15) - 1);
127 const int32_t *filterPos,
int filterSize)
130 for (i = 0; i < dstW; i++) {
132 int srcPos = filterPos[i];
134 for (j = 0; j < filterSize; j++) {
135 val += ((
int)src[srcPos + j]) * filter[filterSize * i + j];
137 dst[i] =
FFMIN(val >> 7, (1 << 15) - 1);
143 const int32_t *filterPos,
int filterSize)
147 for (i = 0; i < dstW; i++) {
149 int srcPos = filterPos[i];
151 for (j = 0; j < filterSize; j++) {
152 val += ((
int)src[srcPos + j]) * filter[filterSize * i + j];
154 dst[i] =
FFMIN(val >> 3, (1 << 19) - 1);
163 for (i = 0; i <
width; i++) {
164 dstU[i] = (
FFMIN(dstU[i], 30775) * 4663 - 9289992) >> 12;
165 dstV[i] = (
FFMIN(dstV[i], 30775) * 4663 - 9289992) >> 12;
172 for (i = 0; i <
width; i++) {
173 dstU[i] = (dstU[i] * 1799 + 4081085) >> 11;
174 dstV[i] = (dstV[i] * 1799 + 4081085) >> 11;
181 for (i = 0; i <
width; i++)
182 dst[i] = (
FFMIN(dst[i], 30189) * 19077 - 39057361) >> 14;
188 for (i = 0; i <
width; i++)
189 dst[i] = (dst[i] * 14071 + 33561947) >> 14;
197 for (i = 0; i <
width; i++) {
198 dstU[i] = (
FFMIN(dstU[i], 30775 << 4) * 4663 - (9289992 << 4)) >> 12;
199 dstV[i] = (
FFMIN(dstV[i], 30775 << 4) * 4663 - (9289992 << 4)) >> 12;
208 for (i = 0; i <
width; i++) {
209 dstU[i] = (dstU[i] * 1799 + (4081085 << 4)) >> 11;
210 dstV[i] = (dstV[i] * 1799 + (4081085 << 4)) >> 11;
218 for (i = 0; i <
width; i++) {
219 dst[i] = ((
int)(
FFMIN(dst[i], 30189 << 4) * 4769
U - (39057361 << 2))) >> 12;
227 for (i = 0; i <
width; i++)
228 dst[i] = (dst[i]*(14071/4) + (33561947<<4)/4)>>12;
232 #define DEBUG_SWSCALE_BUFFERS 0
233 #define DEBUG_BUFFERS(...) \
234 if (DEBUG_SWSCALE_BUFFERS) \
235 av_log(c, AV_LOG_DEBUG, __VA_ARGS__)
238 int srcStride[],
int srcSliceY,
239 int srcSliceH,
uint8_t *dst[],
int dstStride[])
243 const int dstW = c->
dstW;
244 const int dstH = c->
dstH;
277 int chrStart = lumEnd;
301 srcStride[3] = srcStride[0];
306 DEBUG_BUFFERS(
"swscale() %p[%d] %p[%d] %p[%d] %p[%d] -> %p[%d] %p[%d] %p[%d] %p[%d]\n",
307 src[0], srcStride[0], src[1], srcStride[1],
308 src[2], srcStride[2], src[3], srcStride[3],
309 dst[0], dstStride[0], dst[1], dstStride[1],
310 dst[2], dstStride[2], dst[3], dstStride[3]);
311 DEBUG_BUFFERS(
"srcSliceY: %d srcSliceH: %d dstY: %d dstH: %d\n",
312 srcSliceY, srcSliceH, dstY, dstH);
314 vLumFilterSize, vChrFilterSize);
316 if (dstStride[0]&15 || dstStride[1]&15 ||
317 dstStride[2]&15 || dstStride[3]&15) {
318 static int warnedAlready = 0;
321 "Warning: dstStride is not aligned!\n"
322 " ->cannot do aligned memory accesses anymore\n");
327 if ( (uintptr_t)dst[0]&15 || (uintptr_t)dst[1]&15 || (uintptr_t)dst[2]&15
328 || (uintptr_t)src[0]&15 || (uintptr_t)src[1]&15 || (uintptr_t)src[2]&15
329 || dstStride[0]&15 || dstStride[1]&15 || dstStride[2]&15 || dstStride[3]&15
330 || srcStride[0]&15 || srcStride[1]&15 || srcStride[2]&15 || srcStride[3]&15
332 static int warnedAlready=0;
343 if (srcSliceY == 0) {
351 if (!should_dither) {
360 srcSliceY, srcSliceH, chrSrcSliceY, chrSrcSliceH, 1);
365 if (srcSliceY == 0) {
375 hout_slice->
width = dstW;
378 for (; dstY < dstH; dstY++) {
383 const int firstLumSrcY =
FFMAX(1 - vLumFilterSize, vLumFilterPos[dstY]);
386 const int firstChrSrcY =
FFMAX(1 - vChrFilterSize, vChrFilterPos[chrDstY]);
389 int lastLumSrcY =
FFMIN(c->
srcH, firstLumSrcY + vLumFilterSize) - 1;
390 int lastLumSrcY2 =
FFMIN(c->
srcH, firstLumSrcY2 + vLumFilterSize) - 1;
391 int lastChrSrcY =
FFMIN(c->
chrSrcH, firstChrSrcY + vChrFilterSize) - 1;
395 int posY, cPosY, firstPosY, lastPosY, firstCPosY, lastCPosY;
398 if (firstLumSrcY > lastInLumBuf) {
400 hasLumHoles = lastInLumBuf != firstLumSrcY - 1;
408 lastInLumBuf = firstLumSrcY - 1;
410 if (firstChrSrcY > lastInChrBuf) {
412 hasChrHoles = lastInChrBuf != firstChrSrcY - 1;
420 lastInChrBuf = firstChrSrcY - 1;
424 DEBUG_BUFFERS(
"\tfirstLumSrcY: %d lastLumSrcY: %d lastInLumBuf: %d\n",
425 firstLumSrcY, lastLumSrcY, lastInLumBuf);
426 DEBUG_BUFFERS(
"\tfirstChrSrcY: %d lastChrSrcY: %d lastInChrBuf: %d\n",
427 firstChrSrcY, lastChrSrcY, lastInChrBuf);
430 enough_lines = lastLumSrcY2 < srcSliceY + srcSliceH &&
434 lastLumSrcY = srcSliceY + srcSliceH - 1;
435 lastChrSrcY = chrSrcSliceY + chrSrcSliceH - 1;
436 DEBUG_BUFFERS(
"buffering slice: lastLumSrcY %d lastChrSrcY %d\n",
437 lastLumSrcY, lastChrSrcY);
445 if (posY <= lastLumSrcY && !hasLumHoles) {
446 firstPosY =
FFMAX(firstLumSrcY, posY);
450 lastPosY = lastLumSrcY;
454 if (cPosY <= lastChrSrcY && !hasChrHoles) {
455 firstCPosY =
FFMAX(firstChrSrcY, cPosY);
459 lastCPosY = lastChrSrcY;
464 if (posY < lastLumSrcY + 1) {
465 for (i = lumStart; i < lumEnd; ++i)
466 desc[i].
process(c, &desc[i], firstPosY, lastPosY - firstPosY + 1);
469 lumBufIndex += lastLumSrcY - lastInLumBuf;
470 lastInLumBuf = lastLumSrcY;
472 if (cPosY < lastChrSrcY + 1) {
473 for (i = chrStart; i < chrEnd; ++i)
474 desc[i].
process(c, &desc[i], firstCPosY, lastCPosY - firstCPosY + 1);
477 chrBufIndex += lastChrSrcY - lastInChrBuf;
478 lastInChrBuf = lastChrSrcY;
481 if (lumBufIndex >= vLumFilterSize)
482 lumBufIndex -= vLumFilterSize;
483 if (chrBufIndex >= vChrFilterSize)
484 chrBufIndex -= vChrFilterSize;
490 lastInLumBuf, lastInChrBuf);
496 if (dstY >= dstH - 2) {
500 &yuv2packed1, &yuv2packed2, &yuv2packedX, &yuv2anyX);
503 yuv2packed1, yuv2packed2, yuv2packedX, yuv2anyX, use_mmx_vfilter);
507 for (i = vStart; i < vEnd; ++i)
508 desc[i].
process(c, &desc[i], dstY, 1);
513 int height = dstY - lastDstY;
517 fillPlane16(dst[3], dstStride[3], length, height, lastDstY,
521 fillPlane(dst[3], dstStride[3], length, height, lastDstY, 255);
524 #if HAVE_MMXEXT_INLINE
526 __asm__
volatile (
"sfence" :::
"memory");
537 return dstY - lastDstY;
619 src[3] = src[2] =
NULL;
627 const int linesizes[4])
634 for (i = 0; i < 4; i++) {
636 if (!data[plane] || !linesizes[plane])
649 for (yp=0; yp<
h; yp++) {
650 for (xp=0; xp+2<
stride; xp+=3) {
651 int x, y, z,
r,
g,
b;
679 r = av_clip_uintp2(r, 12);
680 g = av_clip_uintp2(g, 12);
681 b = av_clip_uintp2(b, 12);
705 for (yp=0; yp<
h; yp++) {
706 for (xp=0; xp+2<
stride; xp+=3) {
707 int x, y, z,
r,
g,
b;
735 x = av_clip_uintp2(x, 12);
736 y = av_clip_uintp2(y, 12);
737 z = av_clip_uintp2(z, 12);
760 const uint8_t *
const srcSlice[],
761 const int srcStride[],
int srcSliceY,
762 int srcSliceH,
uint8_t *
const dst[],
763 const int dstStride[])
773 int srcSliceY_internal = srcSliceY;
775 if (!srcStride || !dstStride || !dst || !srcSlice) {
776 av_log(c,
AV_LOG_ERROR,
"One of the input parameters to sws_scale() is NULL, please check the calling code\n");
780 for (i=0; i<4; i++) {
781 srcStride2[i] = srcStride[i];
782 dstStride2[i] = dstStride[i];
785 if ((srcSliceY & (macro_height-1)) ||
786 ((srcSliceH& (macro_height-1)) && srcSliceY + srcSliceH != c->
srcH) ||
787 srcSliceY + srcSliceH > c->
srcH) {
788 av_log(c,
AV_LOG_ERROR,
"Slice parameters %d, %d are invalid\n", srcSliceY, srcSliceH);
796 srcSlice, srcStride, srcSliceY, srcSliceH,
820 srcSlice, srcStride, srcSliceY, srcSliceH,
830 memcpy(src2, srcSlice,
sizeof(src2));
831 memcpy(dst2, dst,
sizeof(dst2));
846 if (c->
sliceDir == 0 && srcSliceY != 0 && srcSliceY + srcSliceH != c->
srcH) {
855 for (i = 0; i < 256; i++) {
856 int r,
g,
b, y,
u, v,
a = 0xff;
858 uint32_t p = ((
const uint32_t *)(srcSlice[1]))[i];
859 a = (p >> 24) & 0xFF;
860 r = (p >> 16) & 0xFF;
865 g = ((i >> 2) & 7) * 36;
869 g = ((i >> 3) & 7) * 36;
872 r = ( i >> 3 ) * 255;
873 g = ((i >> 1) & 3) * 85;
879 b = ( i >> 3 ) * 255;
880 g = ((i >> 1) & 3) * 85;
883 #define RGB2YUV_SHIFT 15
884 #define BY ( (int) (0.114 * 219 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
885 #define BV (-(int) (0.081 * 224 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
886 #define BU ( (int) (0.500 * 224 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
887 #define GY ( (int) (0.587 * 219 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
888 #define GV (-(int) (0.419 * 224 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
889 #define GU (-(int) (0.331 * 224 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
890 #define RY ( (int) (0.299 * 219 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
891 #define RV ( (int) (0.500 * 224 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
892 #define RU (-(int) (0.169 * 224 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
897 c->
pal_yuv[i]= y + (u<<8) + (v<<16) + ((unsigned)a<<24);
904 c->
pal_rgb[i]= r + (g<<8) + (b<<16) + ((unsigned)a<<24);
910 c->
pal_rgb[i]= a + (r<<8) + (g<<16) + ((unsigned)b<<24);
916 c->
pal_rgb[i]= a + (b<<8) + (g<<16) + ((unsigned)r<<24);
923 c->
pal_rgb[i]= b + (g<<8) + (r<<16) + ((unsigned)a<<24);
935 base = srcStride[0] < 0 ? rgb0_tmp - srcStride[0] * (srcSliceH-1) : rgb0_tmp;
936 for (y=0; y<srcSliceH; y++){
937 memcpy(base + srcStride[0]*y, src2[0] + srcStride[0]*y, 4*c->
srcW);
938 for (x=c->
src0Alpha-1; x<4*c->srcW; x+=4) {
939 base[ srcStride[0]*y + x] = 0xFF;
951 base = srcStride[0] < 0 ? rgb0_tmp - srcStride[0] * (srcSliceH-1) : rgb0_tmp;
953 xyz12Torgb48(c, (uint16_t*)base, (
const uint16_t*)src2[0], srcStride[0]/2, srcSliceH);
958 for (i = 0; i < 4; i++)
963 for (i=0; i<4; i++) {
968 src2[0] += (srcSliceH - 1) * srcStride[0];
972 src2[3] += (srcSliceH - 1) * srcStride[3];
973 dst2[0] += ( c->
dstH - 1) * dstStride[0];
976 dst2[3] += ( c->
dstH - 1) * dstStride[3];
978 srcSliceY_internal = c->
srcH-srcSliceY-srcSliceH;
984 if (srcSliceY_internal + srcSliceH == c->
srcH)
986 ret = c->
swscale(c, src2, srcStride2, srcSliceY_internal, srcSliceH, dst2, dstStride2);
990 int dstY = c->
dstY ? c->
dstY : srcSliceY + srcSliceH;
991 uint16_t *dst16 = (uint16_t*)(dst2[0] + (dstY - ret) * dstStride2[0]);
int plane
Which of the 4 planes contains the component.
const char const char void * val
int chrBufIndex
Index in ring buffer of the last scaled horizontal chroma line from source.
static void lumRangeToJpeg_c(int16_t *dst, int width)
void(* hcscale_fast)(struct SwsContext *c, int16_t *dst1, int16_t *dst2, int dstWidth, const uint8_t *src1, const uint8_t *src2, int srcW, int xInc)
static av_always_inline int isAnyRGB(enum AVPixelFormat pix_fmt)
av_cold void ff_sws_init_output_funcs(SwsContext *c, yuv2planar1_fn *yuv2plane1, yuv2planarX_fn *yuv2planeX, yuv2interleavedX_fn *yuv2nv12cX, yuv2packed1_fn *yuv2packed1, yuv2packed2_fn *yuv2packed2, yuv2packedX_fn *yuv2packedX, yuv2anyX_fn *yuv2anyX)
static enum AVPixelFormat pix_fmt
static void chrRangeFromJpeg_c(int16_t *dstU, int16_t *dstV, int width)
static const char * format[]
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
int chrSrcH
Height of source chroma planes.
ptrdiff_t const GLvoid * data
static void reset_ptr(const uint8_t *src[], enum AVPixelFormat format)
#define AV_LOG_WARNING
Something somehow does not look correct.
packed RGB 8:8:8, 24bpp, RGBRGB...
int ff_init_slice_from_src(SwsSlice *s, uint8_t *src[4], int stride[4], int srcW, int lumY, int lumH, int chrY, int chrH, int relative)
int vChrDrop
Binary logarithm of extra vertical subsampling factor in source image chroma planes specified by user...
static void lumRangeToJpeg16_c(int16_t *_dst, int width)
Struct which holds all necessary data for processing a slice.
static av_always_inline int is16BPS(enum AVPixelFormat pix_fmt)
Convenience header that includes libavutil's core.
static atomic_int cpu_flags
int srcRange
0 = MPG YUV range, 1 = JPG YUV range (source image).
const uint8_t * lumDither8
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_WB24 unsigned int_TMPL AV_RB16
int dstY
Last destination vertical line output from last slice.
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_RL16
void ff_sws_init_input_funcs(SwsContext *c)
int srcH
Height of source luma/alpha planes.
packed RGB 1:2:1, 8bpp, (msb)1B 2G 1R(lsb)
static int process(struct ResampleContext *c, AudioData *dst, int dst_size, AudioData *src, int src_size, int *consumed)
static void rgb48Toxyz12(struct SwsContext *c, uint16_t *dst, const uint16_t *src, int stride, int h)
static void hScale16To19_c(SwsContext *c, int16_t *_dst, int dstW, const uint8_t *_src, const int16_t *filter, const int32_t *filterPos, int filterSize)
int ff_rotate_slice(SwsSlice *s, int lum, int chr)
#define av_assert0(cond)
assert() equivalent, that is always enabled.
static void filter(int16_t *output, ptrdiff_t out_stride, int16_t *low, ptrdiff_t low_stride, int16_t *high, ptrdiff_t high_stride, int len, int clip)
int chrDstVSubSample
Binary logarithm of vertical subsampling factor between luma/alpha and chroma planes in destination i...
void(* yuv2interleavedX_fn)(struct SwsContext *c, const int16_t *chrFilter, int chrFilterSize, const int16_t **chrUSrc, const int16_t **chrVSrc, uint8_t *dest, int dstW)
Write one line of horizontally scaled chroma to interleaved output with multi-point vertical scaling ...
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
static void lumRangeFromJpeg_c(int16_t *dst, int width)
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
8 bits with AV_PIX_FMT_RGB32 palette
int vChrFilterSize
Vertical filter size for chroma pixels.
#define AV_PIX_FMT_FLAG_FLOAT
The pixel format contains IEEE-754 floating point values.
void(* yuv2anyX_fn)(struct SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrc, const int16_t **chrVSrc, int chrFilterSize, const int16_t **alpSrc, uint8_t **dest, int dstW, int y)
Write one line of horizontally scaled Y/U/V/A to YUV/RGB output by doing multi-point vertical scaling...
#define AV_CPU_FLAG_MMXEXT
SSE integer functions or AMD MMX ext.
#define u(width, name, range_min, range_max)
int cascaded_tmpStride[4]
av_cold void ff_sws_init_swscale_x86(SwsContext *c)
#define SWS_FAST_BILINEAR
#define DECLARE_ALIGNED(n, t, v)
Declare a variable that is aligned in memory.
int lastInLumBuf
Last scaled horizontal luma/alpha line from source in the ring buffer.
int16_t rgb2xyz_matrix[3][4]
enum AVPixelFormat dstFormat
Destination pixel format.
yuv2packedX_fn yuv2packedX
void ff_init_vscale_pfn(SwsContext *c, yuv2planar1_fn yuv2plane1, yuv2planarX_fn yuv2planeX, yuv2interleavedX_fn yuv2nv12cX, yuv2packed1_fn yuv2packed1, yuv2packed2_fn yuv2packed2, yuv2packedX_fn yuv2packedX, yuv2anyX_fn yuv2anyX, int use_mmx)
setup vertical scaler functions
av_cold void ff_sws_init_swscale_aarch64(SwsContext *c)
void(* lumConvertRange)(int16_t *dst, int width)
Color range conversion function for luma plane if needed.
int32_t * vChrFilterPos
Array of vertical filter starting positions for each dst[i] for chroma planes.
#define DEBUG_BUFFERS(...)
int dstH
Height of destination luma/alpha planes.
void(* yuv2packed1_fn)(struct SwsContext *c, const int16_t *lumSrc, const int16_t *chrUSrc[2], const int16_t *chrVSrc[2], const int16_t *alpSrc, uint8_t *dest, int dstW, int uvalpha, int y)
Write one line of horizontally scaled Y/U/V/A to packed-pixel YUV/RGB output without any additional v...
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
#define AV_PIX_FMT_BGR32_1
SwsFunc ff_getSwsFunc(SwsContext *c)
Return function pointer to fastest main scaler path function depending on architecture and available ...
static void chrRangeToJpeg_c(int16_t *dstU, int16_t *dstV, int width)
yuv2packed1_fn yuv2packed1
simple assert() macros that are a bit more flexible than ISO C assert().
static void hScale16To15_c(SwsContext *c, int16_t *dst, int dstW, const uint8_t *_src, const int16_t *filter, const int32_t *filterPos, int filterSize)
void ff_hcscale_fast_c(SwsContext *c, int16_t *dst1, int16_t *dst2, int dstWidth, const uint8_t *src1, const uint8_t *src2, int srcW, int xInc)
static int check_image_pointers(const uint8_t *const data[4], enum AVPixelFormat pix_fmt, const int linesizes[4])
uint8_t * cascaded1_tmp[4]
static av_cold void sws_init_swscale(SwsContext *c)
SwsPlane plane[MAX_SLICE_PLANES]
color planes
void(* chrConvertRange)(int16_t *dst1, int16_t *dst2, int width)
Color range conversion function for chroma planes if needed.
int sliceH
number of lines
uint64_t flags
Combination of AV_PIX_FMT_FLAG_...
static void xyz12Torgb48(struct SwsContext *c, uint16_t *dst, const uint16_t *src, int stride, int h)
av_cold void ff_sws_init_swscale_ppc(SwsContext *c)
int dstRange
0 = MPG YUV range, 1 = JPG YUV range (destination image).
void(* yuv2planar1_fn)(const int16_t *src, uint8_t *dest, int dstW, const uint8_t *dither, int offset)
Write one line of horizontally scaled data to planar output without any additional vertical scaling (...
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
packed RGB 1:2:1, 8bpp, (msb)1R 2G 1B(lsb)
yuv2planar1_fn yuv2plane1
static av_always_inline int isBayer(enum AVPixelFormat pix_fmt)
yuv2interleavedX_fn yuv2nv12cX
void(* hcScale)(struct SwsContext *c, int16_t *dst, int dstW, const uint8_t *src, const int16_t *filter, const int32_t *filterPos, int filterSize)
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
int available_lines
max number of lines that can be hold by this plane
packed RGB 8:8:8, 24bpp, BGRBGR...
av_cold void ff_sws_init_range_convert(SwsContext *c)
struct SwsFilterDescriptor * desc
int dstW
Width of destination luma/alpha planes.
uint8_t * cascaded_tmp[4]
int sliceDir
Direction that slices are fed to the scaler (1 = top-to-bottom, -1 = bottom-to-top).
int cascaded1_tmpStride[4]
int needs_hcscale
Set if there are chroma planes to be converted.
int32_t * vLumFilterPos
Array of vertical filter starting positions for each dst[i] for luma/alpha planes.
static av_always_inline int isBE(enum AVPixelFormat pix_fmt)
packed RGB 3:3:2, 8bpp, (msb)2B 3G 3R(lsb)
int width
Slice line width.
int16_t xyz2rgb_matrix[3][4]
static av_always_inline int isPlanar(enum AVPixelFormat pix_fmt)
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
yuv2planarX_fn yuv2planeX
const uint8_t ff_dither_8x8_128[9][8]
Struct which defines a slice of an image to be scaled or an output for a scaled slice.
int attribute_align_arg sws_scale(struct SwsContext *c, const uint8_t *const srcSlice[], const int srcStride[], int srcSliceY, int srcSliceH, uint8_t *const dst[], const int dstStride[])
swscale wrapper, so we don't need to export the SwsContext.
static av_always_inline void fillPlane(uint8_t *plane, int stride, int width, int height, int y, uint8_t val)
static void lumRangeFromJpeg16_c(int16_t *_dst, int width)
int(* SwsFunc)(struct SwsContext *context, const uint8_t *src[], int srcStride[], int srcSliceY, int srcSliceH, uint8_t *dst[], int dstStride[])
void ff_hyscale_fast_c(SwsContext *c, int16_t *dst, int dstWidth, const uint8_t *src, int srcW, int xInc)
int vLumFilterSize
Vertical filter size for luma/alpha pixels.
int av_get_cpu_flags(void)
Return the flags which specify extensions supported by the CPU.
static void fillPlane16(uint8_t *plane, int stride, int width, int height, int y, int alpha, int bits, const int big_endian)
const uint8_t * chrDither8
static void chrRangeToJpeg16_c(int16_t *_dstU, int16_t *_dstV, int width)
#define flags(name, subs,...)
int lumBufIndex
Index in ring buffer of the last scaled horizontal luma/alpha line from source.
static int swscale(SwsContext *c, const uint8_t *src[], int srcStride[], int srcSliceY, int srcSliceH, uint8_t *dst[], int dstStride[])
Y , 1bpp, 0 is black, 1 is white, in each byte pixels are ordered from the msb to the lsb...
int lastInChrBuf
Last scaled horizontal chroma line from source in the ring buffer.
GLint GLenum GLboolean GLsizei stride
yuv2packed2_fn yuv2packed2
void(* yuv2planarX_fn)(const int16_t *filter, int filterSize, const int16_t **src, uint8_t *dest, int dstW, const uint8_t *dither, int offset)
Write one line of horizontally scaled data to planar output with multi-point vertical scaling between...
void(* yuv2packed2_fn)(struct SwsContext *c, const int16_t *lumSrc[2], const int16_t *chrUSrc[2], const int16_t *chrVSrc[2], const int16_t *alpSrc[2], uint8_t *dest, int dstW, int yalpha, int uvalpha, int y)
Write one line of horizontally scaled Y/U/V/A to packed-pixel YUV/RGB output by doing bilinear scalin...
Y , 1bpp, 0 is white, 1 is black, in each byte pixels are ordered from the msb to the lsb...
void(* yuv2packedX_fn)(struct SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrc, const int16_t **chrVSrc, int chrFilterSize, const int16_t **alpSrc, uint8_t *dest, int dstW, int y)
Write one line of horizontally scaled Y/U/V/A to packed-pixel YUV/RGB output by doing multi-point ver...
enum AVPixelFormat srcFormat
Source pixel format.
packed RGB 3:3:2, 8bpp, (msb)2R 3G 3B(lsb)
void(* hyscale_fast)(struct SwsContext *c, int16_t *dst, int dstWidth, const uint8_t *src, int srcW, int xInc)
Scale one horizontal line of input data using a bilinear filter to produce one line of output data...
struct SwsContext * cascaded_context[3]
#define AV_PIX_FMT_FLAG_BE
Pixel format is big-endian.
SwsFunc swscale
Note that src, dst, srcStride, dstStride will be copied in the sws_scale() wrapper so they can be fre...
void ff_updateMMXDitherTables(SwsContext *c, int dstY, int lumBufIndex, int chrBufIndex, int lastInLumBuf, int lastInChrBuf)
#define AV_PIX_FMT_RGB32_1
static void chrRangeFromJpeg16_c(int16_t *_dstU, int16_t *_dstV, int width)
#define AV_CPU_FLAG_SSE2
PIV SSE2 functions.
static const uint8_t sws_pb_64[8]
static av_always_inline int usePal(enum AVPixelFormat pix_fmt)
static av_always_inline int isPacked(enum AVPixelFormat pix_fmt)
static void hScale8To15_c(SwsContext *c, int16_t *dst, int dstW, const uint8_t *src, const int16_t *filter, const int32_t *filterPos, int filterSize)
int sliceY
index of first line
int depth
Number of bits in the component.
int srcW
Width of source luma/alpha planes.
int chrSrcVSubSample
Binary logarithm of vertical subsampling factor between luma/alpha and chroma planes in source image...
int flags
Flags passed by the user to select scaler algorithm, optimizations, subsampling, etc...
AVPixelFormat
Pixel format.
void(* hyScale)(struct SwsContext *c, int16_t *dst, int dstW, const uint8_t *src, const int16_t *filter, const int32_t *filterPos, int filterSize)
Scale one horizontal line of input data using a filter over the input lines, to produce one (differen...
static av_always_inline int isNBPS(enum AVPixelFormat pix_fmt)
av_cold void ff_sws_init_swscale_arm(SwsContext *c)
#define AV_CEIL_RSHIFT(a, b)
static void hScale8To19_c(SwsContext *c, int16_t *_dst, int dstW, const uint8_t *src, const int16_t *filter, const int32_t *filterPos, int filterSize)