Go to the documentation of this file.
39 { 36, 68, 60, 92, 34, 66, 58, 90, },
40 { 100, 4, 124, 28, 98, 2, 122, 26, },
41 { 52, 84, 44, 76, 50, 82, 42, 74, },
42 { 116, 20, 108, 12, 114, 18, 106, 10, },
43 { 32, 64, 56, 88, 38, 70, 62, 94, },
44 { 96, 0, 120, 24, 102, 6, 126, 30, },
45 { 48, 80, 40, 72, 54, 86, 46, 78, },
46 { 112, 16, 104, 8, 118, 22, 110, 14, },
47 { 36, 68, 60, 92, 34, 66, 58, 90, },
51 64, 64, 64, 64, 64, 64, 64, 64
58 uint8_t *ptr = plane +
stride * y;
67 const int32_t *filterPos,
int filterSize)
72 const uint16_t *
src = (
const uint16_t *)
_src;
82 for (
i = 0;
i < dstW;
i++) {
84 int srcPos = filterPos[
i];
87 for (j = 0; j < filterSize; j++) {
97 const int32_t *filterPos,
int filterSize)
101 const uint16_t *
src = (
const uint16_t *)
_src;
102 int sh =
desc->comp[0].depth - 1;
110 for (
i = 0;
i < dstW;
i++) {
112 int srcPos = filterPos[
i];
115 for (j = 0; j < filterSize; j++) {
125 const uint8_t *
src,
const int16_t *
filter,
126 const int32_t *filterPos,
int filterSize)
129 for (
i = 0;
i < dstW;
i++) {
131 int srcPos = filterPos[
i];
133 for (j = 0; j < filterSize; j++) {
141 const uint8_t *
src,
const int16_t *
filter,
142 const int32_t *filterPos,
int filterSize)
146 for (
i = 0;
i < dstW;
i++) {
148 int srcPos = filterPos[
i];
150 for (j = 0; j < filterSize; j++) {
160 uint32_t _coeff,
int64_t _offset)
162 uint16_t
coeff = _coeff;
168 dstU[
i] =
FFMIN(
U, (1 << 15) - 1);
169 dstV[
i] =
FFMIN(
V, (1 << 15) - 1);
174 uint32_t _coeff,
int64_t _offset)
176 uint16_t
coeff = _coeff;
186 uint32_t _coeff,
int64_t _offset)
188 uint16_t
coeff = _coeff;
198 uint32_t _coeff,
int64_t _offset)
200 uint16_t
coeff = _coeff;
216 dstU[
i] =
FFMIN(
U, (1 << 19) - 1);
217 dstV[
i] =
FFMIN(
V, (1 << 19) - 1);
254 #define DEBUG_SWSCALE_BUFFERS 0
255 #define DEBUG_BUFFERS(...) \
256 if (DEBUG_SWSCALE_BUFFERS) \
257 av_log(c, AV_LOG_DEBUG, __VA_ARGS__)
260 int srcSliceY,
int srcSliceH, uint8_t *
const dst[],
261 const int dstStride[],
int dstSliceY,
int dstSliceH)
263 const int scale_dst = dstSliceY > 0 || dstSliceH <
c->opts.dst_h;
267 const int dstW =
c->opts.dst_w;
268 int dstH =
c->opts.dst_h;
271 const int flags =
c->opts.flags;
272 int32_t *vLumFilterPos =
c->vLumFilterPos;
273 int32_t *vChrFilterPos =
c->vChrFilterPos;
275 const int vLumFilterSize =
c->vLumFilterSize;
276 const int vChrFilterSize =
c->vChrFilterSize;
285 const int chrSrcSliceY = srcSliceY >>
c->chrSrcVSubSample;
286 const int chrSrcSliceH =
AV_CEIL_RSHIFT(srcSliceH,
c->chrSrcVSubSample);
287 int should_dither =
isNBPS(
c->opts.src_format) ||
293 int lastInLumBuf =
c->lastInLumBuf;
294 int lastInChrBuf =
c->lastInChrBuf;
297 int lumEnd =
c->descIndex[0];
298 int chrStart = lumEnd;
299 int chrEnd =
c->descIndex[1];
301 int vEnd =
c->numDesc;
302 SwsSlice *src_slice = &
c->slice[lumStart];
303 SwsSlice *hout_slice = &
c->slice[
c->numSlice-2];
304 SwsSlice *vout_slice = &
c->slice[
c->numSlice-1];
307 int needAlpha =
c->needAlpha;
312 const uint8_t *
src2[4];
323 srcStride2[3] = srcStride[0];
326 memcpy(srcStride2, srcStride,
sizeof(srcStride2));
329 srcStride2[1] *= 1 <<
c->vChrDrop;
330 srcStride2[2] *= 1 <<
c->vChrDrop;
332 DEBUG_BUFFERS(
"swscale() %p[%d] %p[%d] %p[%d] %p[%d] -> %p[%d] %p[%d] %p[%d] %p[%d]\n",
333 src2[0], srcStride2[0],
src2[1], srcStride2[1],
334 src2[2], srcStride2[2],
src2[3], srcStride2[3],
335 dst[0], dstStride[0],
dst[1], dstStride[1],
336 dst[2], dstStride[2],
dst[3], dstStride[3]);
337 DEBUG_BUFFERS(
"srcSliceY: %d srcSliceH: %d dstY: %d dstH: %d\n",
338 srcSliceY, srcSliceH, dstY, dstH);
340 vLumFilterSize, vChrFilterSize);
342 if (dstStride[0]&15 || dstStride[1]&15 ||
343 dstStride[2]&15 || dstStride[3]&15) {
348 "Warning: dstStride is not aligned!\n"
349 " ->cannot do aligned memory accesses anymore\n");
354 if ( (uintptr_t)
dst[0]&15 || (uintptr_t)
dst[1]&15 || (uintptr_t)
dst[2]&15
355 || (uintptr_t)
src2[0]&15 || (uintptr_t)
src2[1]&15 || (uintptr_t)
src2[2]&15
356 || dstStride[0]&15 || dstStride[1]&15 || dstStride[2]&15 || dstStride[3]&15
357 || srcStride2[0]&15 || srcStride2[1]&15 || srcStride2[2]&15 || srcStride2[3]&15
370 dstH = dstY + dstSliceH;
373 }
else if (srcSliceY == 0) {
382 if (!should_dither) {
388 yuv2packed1, yuv2packed2, yuv2packedX, yuv2anyX,
c->use_mmx_vfilter);
391 srcSliceY, srcSliceH, chrSrcSliceY, chrSrcSliceH, 1);
394 dstY, dstSliceH, dstY >>
c->chrDstVSubSample,
396 if (srcSliceY == 0) {
406 hout_slice->
width = dstW;
409 for (; dstY < dstH; dstY++) {
410 const int chrDstY = dstY >>
c->chrDstVSubSample;
411 int use_mmx_vfilter=
c->use_mmx_vfilter;
414 const int firstLumSrcY =
FFMAX(1 - vLumFilterSize, vLumFilterPos[dstY]);
415 const int firstLumSrcY2 =
FFMAX(1 - vLumFilterSize, vLumFilterPos[
FFMIN(dstY | ((1 <<
c->chrDstVSubSample) - 1),
c->opts.dst_h - 1)]);
417 const int firstChrSrcY =
FFMAX(1 - vChrFilterSize, vChrFilterPos[chrDstY]);
420 int lastLumSrcY =
FFMIN(
c->opts.src_h, firstLumSrcY + vLumFilterSize) - 1;
421 int lastLumSrcY2 =
FFMIN(
c->opts.src_h, firstLumSrcY2 + vLumFilterSize) - 1;
422 int lastChrSrcY =
FFMIN(
c->chrSrcH, firstChrSrcY + vChrFilterSize) - 1;
426 int posY, cPosY, firstPosY, lastPosY, firstCPosY, lastCPosY;
429 if (firstLumSrcY > lastInLumBuf) {
431 hasLumHoles = lastInLumBuf != firstLumSrcY - 1;
439 lastInLumBuf = firstLumSrcY - 1;
441 if (firstChrSrcY > lastInChrBuf) {
443 hasChrHoles = lastInChrBuf != firstChrSrcY - 1;
451 lastInChrBuf = firstChrSrcY - 1;
455 DEBUG_BUFFERS(
"\tfirstLumSrcY: %d lastLumSrcY: %d lastInLumBuf: %d\n",
456 firstLumSrcY, lastLumSrcY, lastInLumBuf);
457 DEBUG_BUFFERS(
"\tfirstChrSrcY: %d lastChrSrcY: %d lastInChrBuf: %d\n",
458 firstChrSrcY, lastChrSrcY, lastInChrBuf);
461 enough_lines = lastLumSrcY2 < srcSliceY + srcSliceH &&
462 lastChrSrcY <
AV_CEIL_RSHIFT(srcSliceY + srcSliceH,
c->chrSrcVSubSample);
465 lastLumSrcY = srcSliceY + srcSliceH - 1;
466 lastChrSrcY = chrSrcSliceY + chrSrcSliceH - 1;
467 DEBUG_BUFFERS(
"buffering slice: lastLumSrcY %d lastChrSrcY %d\n",
468 lastLumSrcY, lastChrSrcY);
476 if (posY <= lastLumSrcY && !hasLumHoles) {
477 firstPosY =
FFMAX(firstLumSrcY, posY);
481 lastPosY = lastLumSrcY;
485 if (cPosY <= lastChrSrcY && !hasChrHoles) {
486 firstCPosY =
FFMAX(firstChrSrcY, cPosY);
490 lastCPosY = lastChrSrcY;
495 if (posY < lastLumSrcY + 1) {
496 for (
i = lumStart;
i < lumEnd; ++
i)
500 lastInLumBuf = lastLumSrcY;
502 if (cPosY < lastChrSrcY + 1) {
503 for (
i = chrStart;
i < chrEnd; ++
i)
507 lastInChrBuf = lastChrSrcY;
514 c->dstW_mmx =
c->opts.dst_w;
520 if (dstY >=
c->opts.dst_h - 2) {
524 &yuv2packed1, &yuv2packed2, &yuv2packedX, &yuv2anyX);
527 yuv2packed1, yuv2packed2, yuv2packedX, yuv2anyX, use_mmx_vfilter);
530 for (
i = vStart;
i < vEnd; ++
i)
534 int offset = lastDstY - dstSliceY;
536 int height = dstY - lastDstY;
541 1,
desc->comp[3].depth,
543 }
else if (
is32BPS(dstFormat)) {
546 1,
desc->comp[3].depth,
552 #if HAVE_MMXEXT_INLINE
554 __asm__ volatile (
"sfence" :::
"memory");
560 c->lastInLumBuf = lastInLumBuf;
561 c->lastInChrBuf = lastInChrBuf;
563 return dstY - lastDstY;
575 uint16_t dst_min, uint16_t dst_max,
576 int src_bits,
int src_shift,
int mult_shift,
579 uint16_t src_range = src_max - src_min;
580 uint16_t dst_range = dst_max - dst_min;
581 int total_shift = mult_shift + src_shift;
585 (1
U << (mult_shift - 1));
591 const int src_bits =
bit_depth <= 14 ? 15 : 19;
592 const int src_shift = src_bits -
bit_depth;
593 const int mult_shift =
bit_depth <= 14 ? 14 : 18;
594 const uint16_t mpeg_min = 16
U << (
bit_depth - 8);
595 const uint16_t mpeg_max_lum = 235
U << (
bit_depth - 8);
596 const uint16_t mpeg_max_chr = 240
U << (
bit_depth - 8);
597 const uint16_t jpeg_max = (1
U <<
bit_depth) - 1;
598 uint16_t src_min, src_max_lum, src_max_chr;
599 uint16_t dst_min, dst_max_lum, dst_max_chr;
600 if (
c->opts.src_range) {
602 src_max_lum = jpeg_max;
603 src_max_chr = jpeg_max;
605 dst_max_lum = mpeg_max_lum;
606 dst_max_chr = mpeg_max_chr;
609 src_max_lum = mpeg_max_lum;
610 src_max_chr = mpeg_max_chr;
612 dst_max_lum = jpeg_max;
613 dst_max_chr = jpeg_max;
616 src_bits, src_shift, mult_shift,
617 &
c->lumConvertRange_coeff, &
c->lumConvertRange_offset);
619 src_bits, src_shift, mult_shift,
620 &
c->chrConvertRange_coeff, &
c->chrConvertRange_offset);
625 c->lumConvertRange =
NULL;
626 c->chrConvertRange =
NULL;
627 if (
c->opts.src_range !=
c->opts.dst_range && !
isAnyRGB(
c->opts.dst_format) &&
c->dstBpc < 32) {
629 if (
c->dstBpc <= 14) {
630 if (
c->opts.src_range) {
638 if (
c->opts.src_range) {
649 #elif ARCH_LOONGARCH64
666 &
c->yuv2nv12cX, &
c->yuv2packed1,
667 &
c->yuv2packed2, &
c->yuv2packedX, &
c->yuv2anyX);
670 &
c->readLumPlanar, &
c->readAlpPlanar, &
c->readChrPlanar);
672 if (
c->srcBpc == 8) {
673 if (
c->dstBpc <= 14) {
691 c->needs_hcscale = 1;
706 #elif ARCH_LOONGARCH64
726 const int linesizes[4])
733 for (
i = 0;
i < 4;
i++) {
734 int plane =
desc->comp[
i].plane;
735 if (!
data[plane] || !linesizes[plane])
743 const uint8_t *
src,
int src_stride,
int w,
int h)
747 for (
int yp = 0; yp <
h; yp++) {
748 const uint16_t *src16 = (
const uint16_t *)
src;
749 uint16_t *dst16 = (uint16_t *)
dst;
751 for (
int xp = 0; xp < 3 *
w; xp += 3) {
752 int x, y, z,
r,
g,
b;
764 x =
c->xyz2rgb.gamma.in[x >> 4];
765 y =
c->xyz2rgb.gamma.in[y >> 4];
766 z =
c->xyz2rgb.gamma.in[z >> 4];
769 r =
c->xyz2rgb.mat[0][0] * x +
770 c->xyz2rgb.mat[0][1] * y +
771 c->xyz2rgb.mat[0][2] * z >> 12;
772 g =
c->xyz2rgb.mat[1][0] * x +
773 c->xyz2rgb.mat[1][1] * y +
774 c->xyz2rgb.mat[1][2] * z >> 12;
775 b =
c->xyz2rgb.mat[2][0] * x +
776 c->xyz2rgb.mat[2][1] * y +
777 c->xyz2rgb.mat[2][2] * z >> 12;
786 AV_WB16(dst16 + xp + 0,
c->xyz2rgb.gamma.out[
r] << 4);
787 AV_WB16(dst16 + xp + 1,
c->xyz2rgb.gamma.out[
g] << 4);
788 AV_WB16(dst16 + xp + 2,
c->xyz2rgb.gamma.out[
b] << 4);
790 AV_WL16(dst16 + xp + 0,
c->xyz2rgb.gamma.out[
r] << 4);
791 AV_WL16(dst16 + xp + 1,
c->xyz2rgb.gamma.out[
g] << 4);
792 AV_WL16(dst16 + xp + 2,
c->xyz2rgb.gamma.out[
b] << 4);
802 const uint8_t *
src,
int src_stride,
int w,
int h)
806 for (
int yp = 0; yp <
h; yp++) {
807 uint16_t *src16 = (uint16_t *)
src;
808 uint16_t *dst16 = (uint16_t *)
dst;
810 for (
int xp = 0; xp < 3 *
w; xp += 3) {
811 int x, y, z,
r,
g,
b;
823 r =
c->rgb2xyz.gamma.in[
r >> 4];
824 g =
c->rgb2xyz.gamma.in[
g >> 4];
825 b =
c->rgb2xyz.gamma.in[
b >> 4];
828 x =
c->rgb2xyz.mat[0][0] *
r +
829 c->rgb2xyz.mat[0][1] *
g +
830 c->rgb2xyz.mat[0][2] *
b >> 12;
831 y =
c->rgb2xyz.mat[1][0] *
r +
832 c->rgb2xyz.mat[1][1] *
g +
833 c->rgb2xyz.mat[1][2] *
b >> 12;
834 z =
c->rgb2xyz.mat[2][0] *
r +
835 c->rgb2xyz.mat[2][1] *
g +
836 c->rgb2xyz.mat[2][2] *
b >> 12;
845 AV_WB16(dst16 + xp + 0,
c->rgb2xyz.gamma.out[x] << 4);
846 AV_WB16(dst16 + xp + 1,
c->rgb2xyz.gamma.out[y] << 4);
847 AV_WB16(dst16 + xp + 2,
c->rgb2xyz.gamma.out[z] << 4);
849 AV_WL16(dst16 + xp + 0,
c->rgb2xyz.gamma.out[x] << 4);
850 AV_WL16(dst16 + xp + 1,
c->rgb2xyz.gamma.out[y] << 4);
851 AV_WL16(dst16 + xp + 2,
c->rgb2xyz.gamma.out[z] << 4);
872 for (
int i = 0;
i < 256;
i++) {
873 int r,
g,
b, y,
u, v,
a = 0xff;
876 a = (
p >> 24) & 0xFF;
877 r = (
p >> 16) & 0xFF;
882 g = ((
i >> 2) & 7) * 36;
886 g = ((
i >> 3) & 7) * 36;
889 r = (
i >> 3 ) * 255;
890 g = ((
i >> 1) & 3) * 85;
896 b = (
i >> 3 ) * 255;
897 g = ((
i >> 1) & 3) * 85;
900 #define RGB2YUV_SHIFT 15
901 #define BY ( (int) (0.114 * 219 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
902 #define BV (-(int) (0.081 * 224 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
903 #define BU ( (int) (0.500 * 224 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
904 #define GY ( (int) (0.587 * 219 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
905 #define GV (-(int) (0.419 * 224 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
906 #define GU (-(int) (0.331 * 224 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
907 #define RY ( (int) (0.299 * 219 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
908 #define RV ( (int) (0.500 * 224 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
909 #define RU (-(int) (0.169 * 224 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
914 c->pal_yuv[
i]= y + (
u<<8) + (v<<16) + ((unsigned)
a<<24);
916 switch (
c->opts.dst_format) {
921 c->pal_rgb[
i]=
r + (
g<<8) + (
b<<16) + ((unsigned)
a<<24);
927 c->pal_rgb[
i]=
a + (
r<<8) + (
g<<16) + ((unsigned)
b<<24);
933 c->pal_rgb[
i]=
a + (
b<<8) + (
g<<16) + ((unsigned)
r<<24);
938 c->pal_rgb[
i]=
a + (
r<<8) + (
b<<16) + ((unsigned)
g<<24);
940 c->pal_rgb[
i]=
g + (
b<<8) + (
r<<16) + ((unsigned)
a<<24);
948 c->pal_rgb[
i]=
b + (
g<<8) + (
r<<16) + ((unsigned)
a<<24);
954 const uint8_t *
const srcSlice[],
const int srcStride[],
955 int srcSliceY,
int srcSliceH,
956 uint8_t *
const dstSlice[],
const int dstStride[],
957 int dstSliceY,
int dstSliceH);
960 const uint8_t *
const srcSlice[],
const int srcStride[],
961 int srcSliceY,
int srcSliceH,
962 uint8_t *
const dstSlice[],
const int dstStride[],
963 int dstSliceY,
int dstSliceH)
966 srcSlice, srcStride, srcSliceY, srcSliceH,
967 c->cascaded_tmp[0],
c->cascaded_tmpStride[0], 0,
c->opts.src_h);
972 if (
c->cascaded_context[2])
974 c->cascaded_tmpStride[0], srcSliceY, srcSliceH,
975 c->cascaded_tmp[1],
c->cascaded_tmpStride[1], 0,
c->opts.dst_h);
978 c->cascaded_tmpStride[0], srcSliceY, srcSliceH,
979 dstSlice, dstStride, dstSliceY, dstSliceH);
984 if (
c->cascaded_context[2]) {
987 c->cascaded_tmpStride[1], dstY1 -
ret, dstY1,
988 dstSlice, dstStride, dstSliceY, dstSliceH);
994 const uint8_t *
const srcSlice[],
const int srcStride[],
995 int srcSliceY,
int srcSliceH,
996 uint8_t *
const dstSlice[],
const int dstStride[],
997 int dstSliceY,
int dstSliceH)
999 const int dstH0 =
c->cascaded_context[0]->dst_h;
1001 srcSlice, srcStride, srcSliceY, srcSliceH,
1002 c->cascaded_tmp[0],
c->cascaded_tmpStride[0],
1007 (
const uint8_t *
const * )
c->cascaded_tmp[0],
c->cascaded_tmpStride[0],
1008 0, dstH0, dstSlice, dstStride, dstSliceY, dstSliceH);
1013 const uint8_t *
const srcSlice[],
const int srcStride[],
1014 int srcSliceY,
int srcSliceH,
1015 uint8_t *
const dstSlice[],
const int dstStride[],
1016 int dstSliceY,
int dstSliceH)
1019 const int scale_dst = dstSliceY > 0 || dstSliceH <
sws->
dst_h;
1022 const uint8_t *
src2[4];
1029 int srcSliceY_internal = srcSliceY;
1031 if (!srcStride || !dstStride || !dstSlice || !srcSlice) {
1032 av_log(
c,
AV_LOG_ERROR,
"One of the input parameters to sws_scale() is NULL, please check the calling code\n");
1036 if ((srcSliceY & (macro_height_src - 1)) ||
1037 ((srcSliceH & (macro_height_src - 1)) && srcSliceY + srcSliceH !=
sws->
src_h) ||
1038 srcSliceY + srcSliceH >
sws->
src_h ||
1044 if ((dstSliceY & (macro_height_dst - 1)) ||
1045 ((dstSliceH & (macro_height_dst - 1)) && dstSliceY + dstSliceH !=
sws->
dst_h) ||
1046 dstSliceY + dstSliceH >
sws->
dst_h) {
1065 return scale_gamma(
c, srcSlice, srcStride, srcSliceY, srcSliceH,
1066 dstSlice, dstStride, dstSliceY, dstSliceH);
1068 if (
c->cascaded_context[0] && srcSliceY == 0 && srcSliceH ==
c->cascaded_context[0]->src_h)
1070 dstSlice, dstStride, dstSliceY, dstSliceH);
1073 for (
i = 0;
i < 4;
i++)
1074 memset(
c->dither_error[
i], 0,
sizeof(
c->dither_error[0][0]) * (
sws->
dst_w+2));
1079 memcpy(
src2, srcSlice,
sizeof(
src2));
1080 memcpy(dst2, dstSlice,
sizeof(dst2));
1081 memcpy(srcStride2, srcStride,
sizeof(srcStride2));
1082 memcpy(dstStride2, dstStride,
sizeof(dstStride2));
1085 if (srcSliceY != 0 && srcSliceY + srcSliceH !=
sws->
src_h) {
1090 c->sliceDir = (srcSliceY == 0) ? 1 : -1;
1091 }
else if (scale_dst)
1099 FFABS(srcStride[0]) * srcSliceH + 32);
1100 if (!
c->rgb0_scratch)
1103 base = srcStride[0] < 0 ?
c->rgb0_scratch - srcStride[0] * (srcSliceH-1) :
1105 for (y=0; y<srcSliceH; y++){
1107 for (x=
c->src0Alpha-1; x<4*sws->src_w; x+=4) {
1108 base[ srcStride[0]*y + x] = 0xFF;
1118 FFABS(srcStride[0]) * srcSliceH + 32);
1119 if (!
c->xyz_scratch)
1122 base = srcStride[0] < 0 ?
c->xyz_scratch - srcStride[0] * (srcSliceH-1) :
1125 c->xyz12Torgb48(
c,
base, srcStride[0],
src2[0], srcStride[0],
sws->
src_w, srcSliceH);
1129 if (
c->sliceDir != 1) {
1131 for (
i=0;
i<4;
i++) {
1132 srcStride2[
i] *= -1;
1133 dstStride2[
i] *= -1;
1136 src2[0] += (srcSliceH - 1) * srcStride[0];
1138 src2[1] += ((srcSliceH >>
c->chrSrcVSubSample) - 1) * srcStride[1];
1139 src2[2] += ((srcSliceH >>
c->chrSrcVSubSample) - 1) * srcStride[2];
1140 src2[3] += (srcSliceH - 1) * srcStride[3];
1141 dst2[0] += (
sws->
dst_h - 1) * dstStride[0];
1142 dst2[1] += ((
sws->
dst_h >>
c->chrDstVSubSample) - 1) * dstStride[1];
1143 dst2[2] += ((
sws->
dst_h >>
c->chrDstVSubSample) - 1) * dstStride[2];
1144 dst2[3] += (
sws->
dst_h - 1) * dstStride[3];
1146 srcSliceY_internal =
sws->
src_h-srcSliceY-srcSliceH;
1151 if (
c->convert_unscaled) {
1152 int offset = srcSliceY_internal;
1153 int slice_h = srcSliceH;
1158 for (
i = 0;
i < 4 &&
src2[
i];
i++) {
1161 src2[
i] += (dstSliceY >> ((
i == 1 ||
i == 2) ?
c->chrSrcVSubSample : 0)) * srcStride2[
i];
1164 for (
i = 0;
i < 4 && dst2[
i];
i++) {
1167 dst2[
i] -= (dstSliceY >> ((
i == 1 ||
i == 2) ?
c->chrDstVSubSample : 0)) * dstStride2[
i];
1170 slice_h = dstSliceH;
1176 dst2[0] += dstSliceY * dstStride2[0];
1179 dst2, dstStride2, dstSliceY, dstSliceH);
1188 int dstY =
c->dstY ?
c->dstY : srcSliceY + srcSliceH;
1193 dst = dst2[0] + (dstY -
ret) * dstStride2[0];
1201 if ((srcSliceY_internal + srcSliceH ==
sws->
src_h) || scale_dst)
1212 c->src_ranges.nb_ranges = 0;
1218 int ret, allocated = 0;
1247 unsigned int slice_height)
1265 return c->dst_slice_align;
1269 unsigned int slice_height)
1276 if (!(
c->src_ranges.nb_ranges == 1 &&
1277 c->src_ranges.ranges[0].start == 0 &&
1278 c->src_ranges.ranges[0].len ==
sws->
src_h))
1281 if ((
slice_start > 0 || slice_height < sws->dst_h) &&
1284 "Incorrectly aligned output: %u/%u not multiples of %u\n",
1289 if (
c->slicethread) {
1290 int nb_jobs =
c->nb_slice_ctx;
1297 c->dst_slice_height = slice_height;
1301 for (
int i = 0;
i <
c->nb_slice_ctx;
i++) {
1302 if (
c->slice_err[
i] < 0) {
1303 ret =
c->slice_err[
i];
1308 memset(
c->slice_err, 0,
c->nb_slice_ctx *
sizeof(*
c->slice_err));
1314 ptrdiff_t
offset =
c->frame_dst->linesize[
i] * (ptrdiff_t)(
slice_start >>
c->chrDstVSubSample);
1324 int linesize[4],
int field)
1326 for (
int i = 0;
i < 4;
i++) {
1328 linesize[
i] =
frame->linesize[
i];
1339 for (
int i = 0;
i < 4;
i++) {
1347 for (
int i = 0;
i < 4;
i++)
1363 memcpy(
dst->data,
src->data,
sizeof(
src->data));
1364 memcpy(
dst->linesize,
src->linesize,
sizeof(
src->linesize));
1400 src->buf[0] && !
dst->buf[0] && !
dst->data[0])
1407 if (!
dst->data[0]) {
1415 uint8_t *dst_data[4], *src_data[4];
1416 int dst_linesize[4], src_linesize[4];
1420 (
const uint8_t **) src_data, src_linesize);
1431 #define VALIDATE(field, min, max) \
1432 if (ctx->field < min || ctx->field > max) { \
1433 av_log(ctx, AV_LOG_ERROR, "'%s' (%d) out of range [%d, %d]\n", \
1434 #field, (int) ctx->field, min, max); \
1435 return AVERROR(EINVAL); \
1447 const char *err_msg;
1461 err_msg =
"Cannot convert interlaced to progressive frames or vice versa.\n";
1468 if ((!src_ok || !dst_ok) && !
ff_props_equal(&src_fmt, &dst_fmt)) {
1469 err_msg = src_ok ?
"Unsupported output" :
"Unsupported input";
1476 err_msg =
"Failed initializing scaling graph";
1481 err_msg =
"Incomplete scaling graph";
1495 " fmt:%s csp:%s prim:%s trc:%s\n",
1516 const uint8_t *
const srcSlice[],
1517 const int srcStride[],
int srcSliceY,
1518 int srcSliceH, uint8_t *
const dst[],
1519 const int dstStride[])
1522 if (
c->nb_slice_ctx) {
1523 sws =
c->slice_ctx[0];
1532 int nb_jobs,
int nb_threads)
1539 c->dst_slice_align);
1548 const int vshift = (
i == 1 ||
i == 2) ?
c->chrDstVSubSample : 0;
static av_cold void sws_init_swscale(SwsInternal *c)
static av_always_inline int isBayer(enum AVPixelFormat pix_fmt)
void(* yuv2planar1_fn)(const int16_t *src, uint8_t *dest, int dstW, const uint8_t *dither, int offset)
Write one line of horizontally scaled data to planar output without any additional vertical scaling (...
#define AV_LOG_WARNING
Something somehow does not look correct.
static void process(NormalizeContext *s, AVFrame *in, AVFrame *out)
AVPixelFormat
Pixel format.
int sliceH
number of lines
static av_always_inline int isPacked(enum AVPixelFormat pix_fmt)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
static void lumRangeToJpeg16_c(int16_t *_dst, int width, uint32_t coeff, int64_t offset)
static void lumRangeToJpeg_c(int16_t *dst, int width, uint32_t _coeff, int64_t _offset)
av_cold void ff_sws_init_range_convert_aarch64(SwsInternal *c)
static void frame_start(MPVMainEncContext *const m)
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
int src_w
Deprecated frame property overrides, for the legacy API only.
int ff_sws_graph_reinit(SwsContext *ctx, const SwsFormat *dst, const SwsFormat *src, int field, SwsGraph **out_graph)
Wrapper around ff_sws_graph_create() that reuses the existing graph if the format is compatible.
int ff_rotate_slice(SwsSlice *s, int lum, int chr)
#define AV_PIX_FMT_FLAG_FLOAT
The pixel format contains IEEE-754 floating point values.
SwsPlane plane[MAX_SLICE_PLANES]
color planes
void avpriv_slicethread_execute(AVSliceThread *ctx, int nb_jobs, int execute_main)
Execute slice threading.
av_cold void ff_sws_init_range_convert_loongarch(SwsInternal *c)
This structure describes decoded (raw) audio or video data.
#define u(width, name, range_min, range_max)
static av_always_inline int isGray(enum AVPixelFormat pix_fmt)
Struct which holds all necessary data for processing a slice.
static void FUNC() yuv2planeX(const int16_t *filter, int filterSize, const int16_t **src, uint8_t *dest, int dstW, const uint8_t *dither, int offset)
@ AV_PIX_FMT_MONOWHITE
Y , 1bpp, 0 is white, 1 is black, in each byte pixels are ordered from the msb to the lsb.
#define AV_PIX_FMT_RGB32_1
unsigned flags
Bitmask of SWS_*.
void(* filter)(uint8_t *src, int stride, int qscale)
int sws_receive_slice(SwsContext *sws, unsigned int slice_start, unsigned int slice_height)
Request a horizontal slice of the output data to be written into the frame previously provided to sws...
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
av_cold void ff_sws_init_swscale_riscv(SwsInternal *c)
AVBufferRef * av_buffer_ref(const AVBufferRef *buf)
Create a new reference to an AVBuffer.
int av_get_cpu_flags(void)
Return the flags which specify extensions supported by the CPU.
#define DEBUG_BUFFERS(...)
static void bit_depth(AudioStatsContext *s, const uint64_t *const mask, uint8_t *depth)
static atomic_int cpu_flags
static void hScale16To15_c(SwsInternal *c, int16_t *dst, int dstW, const uint8_t *_src, const int16_t *filter, const int32_t *filterPos, int filterSize)
uint8_t ptrdiff_t const uint8_t * _src
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
@ SWS_FAST_BILINEAR
Scaler selection options.
static av_always_inline int is16BPS(enum AVPixelFormat pix_fmt)
void ff_sws_init_input_funcs(SwsInternal *c, planar1_YV12_fn *lumToYV12, planar1_YV12_fn *alpToYV12, planar2_YV12_fn *chrToYV12, planarX_YV12_fn *readLumPlanar, planarX_YV12_fn *readAlpPlanar, planarX2_YV12_fn *readChrPlanar)
static int validate_params(SwsContext *ctx)
static void chrRangeToJpeg16_c(int16_t *_dstU, int16_t *_dstV, int width, uint32_t coeff, int64_t offset)
static int slice_end(AVCodecContext *avctx, AVFrame *pict, int *got_output)
Handle slice ends.
const char * av_color_space_name(enum AVColorSpace space)
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
static void chrRangeFromJpeg_c(int16_t *dstU, int16_t *dstV, int width, uint32_t _coeff, int64_t _offset)
static void chrRangeFromJpeg16_c(int16_t *_dstU, int16_t *_dstV, int width, uint32_t coeff, int64_t offset)
int sws_frame_setup(SwsContext *ctx, const AVFrame *dst, const AVFrame *src)
Like sws_scale_frame, but without actually scaling.
av_cold void ff_sws_init_xyzdsp_aarch64(SwsInternal *c)
static double val(void *priv, double ch)
static av_always_inline int isNBPS(enum AVPixelFormat pix_fmt)
static void init_range_convert_constants(SwsInternal *c)
enum AVColorTransferCharacteristic trc
@ AV_PIX_FMT_BGR8
packed RGB 3:3:2, 8bpp, (msb)2B 3G 3R(lsb)
static void hScale8To19_c(SwsInternal *c, int16_t *_dst, int dstW, const uint8_t *src, const int16_t *filter, const int32_t *filterPos, int filterSize)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
#define FF_ARRAY_ELEMS(a)
void(* yuv2packed2_fn)(SwsInternal *c, const int16_t *lumSrc[2], const int16_t *chrUSrc[2], const int16_t *chrVSrc[2], const int16_t *alpSrc[2], uint8_t *dest, int dstW, int yalpha, int uvalpha, int y)
Write one line of horizontally scaled Y/U/V/A to packed-pixel YUV/RGB output by doing bilinear scalin...
SwsDither dither
Dither mode.
static const uint16_t dither[8][8]
void ff_update_palette(SwsInternal *c, const uint32_t *pal)
#define AV_CEIL_RSHIFT(a, b)
av_cold void ff_sws_init_swscale_arm(SwsInternal *c)
int flags
Flags modifying the (de)muxer behaviour.
static enum AVPixelFormat pix_fmt
int width
Slice line width.
#define av_assert0(cond)
assert() equivalent, that is always enabled.
#define VALIDATE(field, min, max)
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_RL16
static void chrRangeToJpeg_c(int16_t *dstU, int16_t *dstV, int width, uint32_t _coeff, int64_t _offset)
void ff_hcscale_fast_c(SwsInternal *c, int16_t *dst1, int16_t *dst2, int dstWidth, const uint8_t *src1, const uint8_t *src2, int srcW, int xInc)
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this field
av_cold void ff_sws_init_range_convert_riscv(SwsInternal *c)
#define AV_PIX_FMT_BGR32_1
void sws_frame_end(SwsContext *sws)
Finish the scaling process for a pair of source/destination frames previously submitted with sws_fram...
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
av_cold void ff_sws_init_range_convert_x86(SwsInternal *c)
@ AV_PIX_FMT_GRAY8A
alias for AV_PIX_FMT_YA8
static int scale_internal(SwsContext *sws, const uint8_t *const srcSlice[], const int srcStride[], int srcSliceY, int srcSliceH, uint8_t *const dstSlice[], const int dstStride[], int dstSliceY, int dstSliceH)
static av_always_inline void fillPlane(uint8_t *plane, int stride, int width, int height, int y, uint8_t val)
static void rgb48Toxyz12_c(const SwsInternal *c, uint8_t *dst, int dst_stride, const uint8_t *src, int src_stride, int w, int h)
New swscale design to change SwsGraph is what coordinates multiple passes These can include cascaded scaling error diffusion and so on Or we could have separate passes for the vertical and horizontal scaling In between each SwsPass lies a fully allocated image buffer Graph passes may have different levels of e g we can have a single threaded error diffusion pass following a multi threaded scaling pass SwsGraph is internally recreated whenever the image format
int available_lines
max number of lines that can be hold by this plane
int gamma_flag
Use gamma correct scaling.
@ AV_PIX_FMT_MONOBLACK
Y , 1bpp, 0 is black, 1 is white, in each byte pixels are ordered from the msb to the lsb.
#define FF_PTR_ADD(ptr, off)
const char * av_color_primaries_name(enum AVColorPrimaries primaries)
@ AV_PIX_FMT_RGB8
packed RGB 3:3:2, 8bpp, (msb)3R 3G 2B(lsb)
static void hScale8To15_c(SwsInternal *c, int16_t *dst, int dstW, const uint8_t *src, const int16_t *filter, const int32_t *filterPos, int filterSize)
av_cold void ff_sws_init_range_convert(SwsInternal *c)
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
static void hScale16To19_c(SwsInternal *c, int16_t *_dst, int dstW, const uint8_t *_src, const int16_t *filter, const int32_t *filterPos, int filterSize)
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
int dstY
Last destination vertical line output from last slice.
av_cold void ff_sws_init_xyzdsp(SwsInternal *c)
@ AV_PIX_FMT_BGR4_BYTE
packed RGB 1:2:1, 8bpp, (msb)1B 2G 1R(lsb)
int ff_range_add(RangeList *r, unsigned int start, unsigned int len)
#define attribute_align_arg
void(* yuv2packedX_fn)(SwsInternal *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrc, const int16_t **chrVSrc, int chrFilterSize, const int16_t **alpSrc, uint8_t *dest, int dstW, int y)
Write one line of horizontally scaled Y/U/V/A to packed-pixel YUV/RGB output by doing multi-point ver...
#define AV_CPU_FLAG_SSE2
PIV SSE2 functions.
void ff_sws_graph_free(SwsGraph **pgraph)
Uninitialize any state associate with this filter graph and free it.
void ff_sws_slice_worker(void *priv, int jobnr, int threadnr, int nb_jobs, int nb_threads)
static av_always_inline int isBE(enum AVPixelFormat pix_fmt)
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
av_cold void ff_sws_init_swscale_loongarch(SwsInternal *c)
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
#define DECLARE_ALIGNED(n, t, v)
static void get_frame_pointers(const AVFrame *frame, uint8_t *data[4], int linesize[4], int field)
static void fillPlane16(uint8_t *plane, int stride, int width, int height, int y, int alpha, int bits, const int big_endian)
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
static av_always_inline int usePal(enum AVPixelFormat pix_fmt)
static av_always_inline int isAnyRGB(enum AVPixelFormat pix_fmt)
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
int src_h
Width and height of the source frame.
static void xyz12Torgb48_c(const SwsInternal *c, uint8_t *dst, int dst_stride, const uint8_t *src, int src_stride, int w, int h)
static const uint8_t *BS_FUNC() align(BSCTX *bc)
Skip bits to a byte boundary.
static av_always_inline int is32BPS(enum AVPixelFormat pix_fmt)
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
static void lumRangeFromJpeg_c(int16_t *dst, int width, uint32_t _coeff, int64_t _offset)
av_cold void ff_sws_init_swscale_ppc(SwsInternal *c)
int dst_format
Destination pixel format.
static void fillPlane32(uint8_t *plane, int stride, int width, int height, int y, int alpha, int bits, const int big_endian, int is_float)
void(* yuv2anyX_fn)(SwsInternal *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrc, const int16_t **chrVSrc, int chrFilterSize, const int16_t **alpSrc, uint8_t **dest, int dstW, int y)
Write one line of horizontally scaled Y/U/V/A to YUV/RGB output by doing multi-point vertical scaling...
av_cold void ff_sws_init_swscale_x86(SwsInternal *c)
static int scale_cascaded(SwsInternal *c, const uint8_t *const srcSlice[], const int srcStride[], int srcSliceY, int srcSliceH, uint8_t *const dstSlice[], const int dstStride[], int dstSliceY, int dstSliceH)
unsigned int dst_slice_align
static SwsContext * sws[3]
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
int sws_send_slice(SwsContext *sws, unsigned int slice_start, unsigned int slice_height)
Indicate that a horizontal slice of input data is available in the source frame previously provided t...
#define i(width, name, range_min, range_max)
void ff_sws_init_scale(SwsInternal *c)
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
static int check_image_pointers(const uint8_t *const data[4], enum AVPixelFormat pix_fmt, const int linesizes[4])
void(* yuv2interleavedX_fn)(enum AVPixelFormat dstFormat, const uint8_t *chrDither, const int16_t *chrFilter, int chrFilterSize, const int16_t **chrUSrc, const int16_t **chrVSrc, uint8_t *dest, int dstW)
Write one line of horizontally scaled chroma to interleaved output with multi-point vertical scaling ...
#define AV_PIX_FMT_FLAG_BE
Pixel format is big-endian.
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
int dst_h
Width and height of the destination frame.
void ff_updateMMXDitherTables(SwsInternal *c, int dstY)
@ AV_PIX_FMT_RGB4_BYTE
packed RGB 1:2:1, 8bpp, (msb)1R 2G 1B(lsb)
Struct which defines a slice of an image to be scaled or an output for a scaled slice.
#define AV_FRAME_FLAG_INTERLACED
A flag to mark frames whose content is interlaced.
static int slice_start(SliceContext *sc, VVCContext *s, VVCFrameContext *fc, const CodedBitstreamUnit *unit, const int is_first_slice)
int ff_init_slice_from_src(SwsSlice *s, uint8_t *const src[4], const int stride[4], int srcW, int lumY, int lumH, int chrY, int chrH, int relative)
static int frame_ref(AVFrame *dst, const AVFrame *src)
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
void(* yuv2packed1_fn)(SwsInternal *c, const int16_t *lumSrc, const int16_t *chrUSrc[2], const int16_t *chrVSrc[2], const int16_t *alpSrc, uint8_t *dest, int dstW, int uvalpha, int y)
Write one line of horizontally scaled Y/U/V/A to packed-pixel YUV/RGB output without any additional v...
unsigned int sws_receive_slice_alignment(const SwsContext *sws)
Get the alignment required for slices.
__asm__(".macro parse_r var r\n\t" "\\var = -1\n\t" _IFC_REG(0) _IFC_REG(1) _IFC_REG(2) _IFC_REG(3) _IFC_REG(4) _IFC_REG(5) _IFC_REG(6) _IFC_REG(7) _IFC_REG(8) _IFC_REG(9) _IFC_REG(10) _IFC_REG(11) _IFC_REG(12) _IFC_REG(13) _IFC_REG(14) _IFC_REG(15) _IFC_REG(16) _IFC_REG(17) _IFC_REG(18) _IFC_REG(19) _IFC_REG(20) _IFC_REG(21) _IFC_REG(22) _IFC_REG(23) _IFC_REG(24) _IFC_REG(25) _IFC_REG(26) _IFC_REG(27) _IFC_REG(28) _IFC_REG(29) _IFC_REG(30) _IFC_REG(31) ".iflt \\var\n\t" ".error \"Unable to parse register name \\r\"\n\t" ".endif\n\t" ".endm")
int sws_frame_start(SwsContext *sws, AVFrame *dst, const AVFrame *src)
Initialize the scaling process for a given pair of source/destination frames.
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
static const uint8_t sws_pb_64[8]
void(* yuv2planarX_fn)(const int16_t *filter, int filterSize, const int16_t **src, uint8_t *dest, int dstW, const uint8_t *dither, int offset)
Write one line of horizontally scaled data to planar output with multi-point vertical scaling between...
static void reset_ptr(const uint8_t *src[], enum AVPixelFormat format)
void ff_init_vscale_pfn(SwsInternal *c, yuv2planar1_fn yuv2plane1, yuv2planarX_fn yuv2planeX, yuv2interleavedX_fn yuv2nv12cX, yuv2packed1_fn yuv2packed1, yuv2packed2_fn yuv2packed2, yuv2packedX_fn yuv2packedX, yuv2anyX_fn yuv2anyX, int use_mmx)
setup vertical scaler functions
int attribute_align_arg sws_scale(SwsContext *sws, const uint8_t *const srcSlice[], const int srcStride[], int srcSliceY, int srcSliceH, uint8_t *const dst[], const int dstStride[])
swscale wrapper, so we don't need to export the SwsContext.
@ SWS_PRINT_INFO
Emit verbose log of scaling parameters.
static void lumRangeFromJpeg16_c(int16_t *_dst, int width, uint32_t coeff, int64_t offset)
#define atomic_exchange_explicit(object, desired, order)
@ SWS_STRICT
Return an error on underspecified conversions.
const uint8_t ff_dither_8x8_128[9][8]
#define AV_CPU_FLAG_MMXEXT
SSE integer functions or AMD MMX ext.
IDirect3DDxgiInterfaceAccess _COM_Outptr_ void ** p
int ff_swscale(SwsInternal *c, const uint8_t *const src[], const int srcStride[], int srcSliceY, int srcSliceH, uint8_t *const dst[], const int dstStride[], int dstSliceY, int dstSliceH)
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
static int scale_gamma(SwsInternal *c, const uint8_t *const srcSlice[], const int srcStride[], int srcSliceY, int srcSliceH, uint8_t *const dstSlice[], const int dstStride[], int dstSliceY, int dstSliceH)
int sliceY
index of first line
Filter graph, which represents a 'baked' pixel format conversion.
int src_format
Source pixel format.
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
void ff_hyscale_fast_c(SwsInternal *c, int16_t *dst, int dstWidth, const uint8_t *src, int srcW, int xInc)
av_cold void ff_sws_init_output_funcs(SwsInternal *c, yuv2planar1_fn *yuv2plane1, yuv2planarX_fn *yuv2planeX, yuv2interleavedX_fn *yuv2nv12cX, yuv2packed1_fn *yuv2packed1, yuv2packed2_fn *yuv2packed2, yuv2packedX_fn *yuv2packedX, yuv2anyX_fn *yuv2anyX)
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
av_cold void ff_sws_init_swscale_aarch64(SwsInternal *c)
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
static const double coeff[2][5]
static SwsInternal * sws_internal(const SwsContext *sws)
int sws_scale_frame(SwsContext *sws, AVFrame *dst, const AVFrame *src)
Scale source data from src and write the output to dst.
static void solve_range_convert(uint16_t src_min, uint16_t src_max, uint16_t dst_min, uint16_t dst_max, int src_bits, int src_shift, int mult_shift, uint32_t *coeff, int64_t *offset)
static av_always_inline int isPlanar(enum AVPixelFormat pix_fmt)
Main external API structure.
#define AV_PIX_FMT_FLAG_PAL
Pixel format has a palette in data[1], values are indexes in this palette.
New swscale design to change SwsGraph is what coordinates multiple passes These can include cascaded scaling error diffusion and so on Or we could have separate passes for the vertical and horizontal scaling In between each SwsPass lies a fully allocated image buffer Graph passes may have different levels of e g we can have a single threaded error diffusion pass following a multi threaded scaling pass SwsGraph is internally recreated whenever the image dimensions or settings change in any way splits interlaced images into separate and calls ff_sws_graph_run() on each. From the point of view of SwsGraph itself
const char * av_color_transfer_name(enum AVColorTransferCharacteristic transfer)
enum AVColorPrimaries prim
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_WB24 unsigned int_TMPL AV_RB16
static av_always_inline int isALPHA(enum AVPixelFormat pix_fmt)