41 { 1, 3, 1, 3, 1, 3, 1, 3, },
42 { 2, 0, 2, 0, 2, 0, 2, 0, },
43 { 1, 3, 1, 3, 1, 3, 1, 3, },
47 { 6, 2, 6, 2, 6, 2, 6, 2, },
48 { 0, 4, 0, 4, 0, 4, 0, 4, },
49 { 6, 2, 6, 2, 6, 2, 6, 2, },
53 { 8, 4, 11, 7, 8, 4, 11, 7, },
54 { 2, 14, 1, 13, 2, 14, 1, 13, },
55 { 10, 6, 9, 5, 10, 6, 9, 5, },
56 { 0, 12, 3, 15, 0, 12, 3, 15, },
57 { 8, 4, 11, 7, 8, 4, 11, 7, },
61 { 17, 9, 23, 15, 16, 8, 22, 14, },
62 { 5, 29, 3, 27, 4, 28, 2, 26, },
63 { 21, 13, 19, 11, 20, 12, 18, 10, },
64 { 0, 24, 6, 30, 1, 25, 7, 31, },
65 { 16, 8, 22, 14, 17, 9, 23, 15, },
66 { 4, 28, 2, 26, 5, 29, 3, 27, },
67 { 20, 12, 18, 10, 21, 13, 19, 11, },
68 { 1, 25, 7, 31, 0, 24, 6, 30, },
69 { 17, 9, 23, 15, 16, 8, 22, 14, },
73 { 0, 55, 14, 68, 3, 58, 17, 72, },
74 { 37, 18, 50, 32, 40, 22, 54, 35, },
75 { 9, 64, 5, 59, 13, 67, 8, 63, },
76 { 46, 27, 41, 23, 49, 31, 44, 26, },
77 { 2, 57, 16, 71, 1, 56, 15, 70, },
78 { 39, 21, 52, 34, 38, 19, 51, 33, },
79 { 11, 66, 7, 62, 10, 65, 6, 60, },
80 { 48, 30, 43, 25, 47, 29, 42, 24, },
81 { 0, 55, 14, 68, 3, 58, 17, 72, },
86 {117, 62, 158, 103, 113, 58, 155, 100, },
87 { 34, 199, 21, 186, 31, 196, 17, 182, },
88 {144, 89, 131, 76, 141, 86, 127, 72, },
89 { 0, 165, 41, 206, 10, 175, 52, 217, },
90 {110, 55, 151, 96, 120, 65, 162, 107, },
91 { 28, 193, 14, 179, 38, 203, 24, 189, },
92 {138, 83, 124, 69, 148, 93, 134, 79, },
93 { 7, 172, 48, 213, 3, 168, 45, 210, },
94 {117, 62, 158, 103, 113, 58, 155, 100, },
99 { 0, 143, 18, 200, 2, 156, 25, 215, },
100 { 78, 28, 125, 64, 89, 36, 138, 74, },
101 { 10, 180, 3, 161, 16, 195, 8, 175, },
102 {109, 51, 93, 38, 121, 60, 105, 47, },
103 { 1, 152, 23, 210, 0, 147, 20, 205, },
104 { 85, 33, 134, 71, 81, 30, 130, 67, },
105 { 14, 190, 6, 171, 12, 185, 5, 166, },
106 {117, 57, 101, 44, 113, 54, 97, 41, },
107 { 0, 143, 18, 200, 2, 156, 25, 215, },
112 { 0, 124, 8, 193, 0, 140, 12, 213, },
113 { 55, 14, 104, 42, 66, 19, 119, 52, },
114 { 3, 168, 1, 145, 6, 187, 3, 162, },
115 { 86, 31, 70, 21, 99, 39, 82, 28, },
116 { 0, 134, 11, 206, 0, 129, 9, 200, },
117 { 62, 17, 114, 48, 58, 16, 109, 45, },
118 { 5, 181, 2, 157, 4, 175, 1, 151, },
119 { 95, 36, 78, 26, 90, 34, 74, 24, },
120 { 0, 124, 8, 193, 0, 140, 12, 213, },
125 { 0, 107, 3, 187, 0, 125, 6, 212, },
126 { 39, 7, 86, 28, 49, 11, 102, 36, },
127 { 1, 158, 0, 131, 3, 180, 1, 151, },
128 { 68, 19, 52, 12, 81, 25, 64, 17, },
129 { 0, 119, 5, 203, 0, 113, 4, 195, },
130 { 45, 9, 96, 33, 42, 8, 91, 30, },
131 { 2, 172, 1, 144, 2, 165, 0, 137, },
132 { 77, 23, 60, 15, 72, 21, 56, 14, },
133 { 0, 107, 3, 187, 0, 125, 6, 212, },
137 #define output_pixel(pos, val, bias, signedness) \
139 AV_WB16(pos, bias + av_clip_ ## signedness ## 16(val >> shift)); \
141 AV_WL16(pos, bias + av_clip_ ## signedness ## 16(val >> shift)); \
146 int big_endian,
int output_bits)
152 for (i = 0; i < dstW; i++) {
153 int val = src[i] + (1 << (shift - 1));
160 const int32_t **src, uint16_t *dest,
int dstW,
161 int big_endian,
int output_bits)
167 for (i = 0; i < dstW; i++) {
168 int val = 1 << (shift - 1);
177 for (j = 0; j < filterSize; j++)
178 val += src[j][i] * (
unsigned)filter[j];
186 #define output_pixel(pos, val) \
188 AV_WB16(pos, av_clip_uintp2(val >> shift, output_bits)); \
190 AV_WL16(pos, av_clip_uintp2(val >> shift, output_bits)); \
195 int big_endian,
int output_bits)
198 int shift = 15 - output_bits;
200 for (i = 0; i < dstW; i++) {
201 int val = src[i] + (1 << (shift - 1));
208 const int16_t **src, uint16_t *dest,
int dstW,
209 int big_endian,
int output_bits)
212 int shift = 11 + 16 - output_bits;
214 for (i = 0; i < dstW; i++) {
215 int val = 1 << (shift - 1);
218 for (j = 0; j < filterSize; j++)
219 val += src[j][i] * filter[j];
227 #define yuv2NBPS(bits, BE_LE, is_be, template_size, typeX_t) \
228 static void yuv2plane1_ ## bits ## BE_LE ## _c(const int16_t *src, \
229 uint8_t *dest, int dstW, \
230 const uint8_t *dither, int offset)\
232 yuv2plane1_ ## template_size ## _c_template((const typeX_t *) src, \
233 (uint16_t *) dest, dstW, is_be, bits); \
235 static void yuv2planeX_ ## bits ## BE_LE ## _c(const int16_t *filter, int filterSize, \
236 const int16_t **src, uint8_t *dest, int dstW, \
237 const uint8_t *dither, int offset)\
239 yuv2planeX_## template_size ## _c_template(filter, \
240 filterSize, (const typeX_t **) src, \
241 (uint16_t *) dest, dstW, is_be, bits); \
245 yuv2NBPS(10, BE, 1, 10, int16_t)
246 yuv2NBPS(10, LE, 0, 10, int16_t)
247 yuv2NBPS(12, BE, 1, 10, int16_t)
248 yuv2NBPS(12, LE, 0, 10, int16_t)
249 yuv2NBPS(14, BE, 1, 10, int16_t)
250 yuv2NBPS(14, LE, 0, 10, int16_t)
251 yuv2NBPS(16, BE, 1, 16,
int32_t)
252 yuv2NBPS(16, LE, 0, 16,
int32_t)
254 static
void yuv2planeX_8_c(const int16_t *
filter,
int filterSize,
255 const int16_t **src,
uint8_t *dest,
int dstW,
259 for (i=0; i<dstW; i++) {
260 int val = dither[(i +
offset) & 7] << 12;
262 for (j=0; j<filterSize; j++)
263 val += src[j][i] * filter[j];
265 dest[i]= av_clip_uint8(val>>19);
273 for (i=0; i<dstW; i++) {
274 int val = (src[i] + dither[(i +
offset) & 7]) >> 7;
275 dest[i]= av_clip_uint8(val);
280 const int16_t **chrUSrc,
const int16_t **chrVSrc,
288 for (i=0; i<chrDstW; i++) {
289 int u = chrDither[i & 7] << 12;
290 int v = chrDither[(i + 3) & 7] << 12;
292 for (j=0; j<chrFilterSize; j++) {
293 u += chrUSrc[j][i] * chrFilter[j];
294 v += chrVSrc[j][i] * chrFilter[j];
297 dest[2*i]= av_clip_uint8(u>>19);
298 dest[2*i+1]= av_clip_uint8(v>>19);
301 for (i=0; i<chrDstW; i++) {
302 int u = chrDither[i & 7] << 12;
303 int v = chrDither[(i + 3) & 7] << 12;
305 for (j=0; j<chrFilterSize; j++) {
306 u += chrUSrc[j][i] * chrFilter[j];
307 v += chrVSrc[j][i] * chrFilter[j];
310 dest[2*i]= av_clip_uint8(v>>19);
311 dest[2*i+1]= av_clip_uint8(u>>19);
315 #define accumulate_bit(acc, val) \
317 acc |= (val) >= (128 + 110)
318 #define output_pixel(pos, acc) \
319 if (target == AV_PIX_FMT_MONOBLACK) { \
327 const int16_t **lumSrc,
int lumFilterSize,
328 const int16_t *chrFilter,
const int16_t **chrUSrc,
329 const int16_t **chrVSrc,
int chrFilterSize,
330 const int16_t **alpSrc,
uint8_t *dest,
int dstW,
338 for (i = 0; i < dstW; i += 2) {
343 for (j = 0; j < lumFilterSize; j++) {
344 Y1 += lumSrc[j][i] * lumFilter[j];
345 Y2 += lumSrc[j][i+1] * lumFilter[j];
349 if ((Y1 | Y2) & 0x100) {
350 Y1 = av_clip_uint8(Y1);
351 Y2 = av_clip_uint8(Y2);
356 acc = 2*acc + (Y1 >= 128);
361 acc = 2*acc + (err >= 128);
380 const int16_t *ubuf[2],
const int16_t *vbuf[2],
381 const int16_t *abuf[2],
uint8_t *dest,
int dstW,
382 int yalpha,
int uvalpha,
int y,
385 const int16_t *buf0 = buf[0], *buf1 = buf[1];
387 int yalpha1 = 4096 - yalpha;
393 for (i = 0; i < dstW; i +=2) {
396 Y = (buf0[i + 0] * yalpha1 + buf1[i + 0] * yalpha) >> 19;
399 acc = 2*acc + (Y >= 128);
402 err = (buf0[i + 1] * yalpha1 + buf1[i + 1] * yalpha) >> 19;
405 acc = 2*acc + (err >= 128);
413 for (i = 0; i < dstW; i += 8) {
416 Y = (buf0[i + 0] * yalpha1 + buf1[i + 0] * yalpha) >> 19;
418 Y = (buf0[i + 1] * yalpha1 + buf1[i + 1] * yalpha) >> 19;
420 Y = (buf0[i + 2] * yalpha1 + buf1[i + 2] * yalpha) >> 19;
422 Y = (buf0[i + 3] * yalpha1 + buf1[i + 3] * yalpha) >> 19;
424 Y = (buf0[i + 4] * yalpha1 + buf1[i + 4] * yalpha) >> 19;
426 Y = (buf0[i + 5] * yalpha1 + buf1[i + 5] * yalpha) >> 19;
428 Y = (buf0[i + 6] * yalpha1 + buf1[i + 6] * yalpha) >> 19;
430 Y = (buf0[i + 7] * yalpha1 + buf1[i + 7] * yalpha) >> 19;
440 const int16_t *ubuf[2],
const int16_t *vbuf[2],
441 const int16_t *abuf0,
uint8_t *dest,
int dstW,
450 for (i = 0; i < dstW; i +=2) {
453 Y = ((buf0[i + 0] + 64) >> 7);
456 acc = 2*acc + (Y >= 128);
459 err = ((buf0[i + 1] + 64) >> 7);
462 acc = 2*acc + (err >= 128);
470 for (i = 0; i < dstW; i += 8) {
487 #undef accumulate_bit
489 #define YUV2PACKEDWRAPPER(name, base, ext, fmt) \
490 static void name ## ext ## _X_c(SwsContext *c, const int16_t *lumFilter, \
491 const int16_t **lumSrc, int lumFilterSize, \
492 const int16_t *chrFilter, const int16_t **chrUSrc, \
493 const int16_t **chrVSrc, int chrFilterSize, \
494 const int16_t **alpSrc, uint8_t *dest, int dstW, \
497 name ## base ## _X_c_template(c, lumFilter, lumSrc, lumFilterSize, \
498 chrFilter, chrUSrc, chrVSrc, chrFilterSize, \
499 alpSrc, dest, dstW, y, fmt); \
502 static void name ## ext ## _2_c(SwsContext *c, const int16_t *buf[2], \
503 const int16_t *ubuf[2], const int16_t *vbuf[2], \
504 const int16_t *abuf[2], uint8_t *dest, int dstW, \
505 int yalpha, int uvalpha, int y) \
507 name ## base ## _2_c_template(c, buf, ubuf, vbuf, abuf, \
508 dest, dstW, yalpha, uvalpha, y, fmt); \
511 static void name ## ext ## _1_c(SwsContext *c, const int16_t *buf0, \
512 const int16_t *ubuf[2], const int16_t *vbuf[2], \
513 const int16_t *abuf0, uint8_t *dest, int dstW, \
514 int uvalpha, int y) \
516 name ## base ## _1_c_template(c, buf0, ubuf, vbuf, \
517 abuf0, dest, dstW, uvalpha, \
524 #define output_pixels(pos, Y1, U, Y2, V) \
525 if (target == AV_PIX_FMT_YUYV422) { \
526 dest[pos + 0] = Y1; \
528 dest[pos + 2] = Y2; \
532 dest[pos + 1] = Y1; \
534 dest[pos + 3] = Y2; \
539 const int16_t **lumSrc,
int lumFilterSize,
540 const int16_t *chrFilter,
const int16_t **chrUSrc,
541 const int16_t **chrVSrc,
int chrFilterSize,
542 const int16_t **alpSrc,
uint8_t *dest,
int dstW,
547 for (i = 0; i < ((dstW + 1) >> 1); i++) {
554 for (j = 0; j < lumFilterSize; j++) {
555 Y1 += lumSrc[j][i * 2] * lumFilter[j];
556 Y2 += lumSrc[j][i * 2 + 1] * lumFilter[j];
558 for (j = 0; j < chrFilterSize; j++) {
559 U += chrUSrc[j][i] * chrFilter[j];
560 V += chrVSrc[j][i] * chrFilter[j];
566 if ((Y1 | Y2 | U | V) & 0x100) {
567 Y1 = av_clip_uint8(Y1);
568 Y2 = av_clip_uint8(Y2);
569 U = av_clip_uint8(U);
570 V = av_clip_uint8(V);
578 const int16_t *ubuf[2],
const int16_t *vbuf[2],
579 const int16_t *abuf[2],
uint8_t *dest,
int dstW,
580 int yalpha,
int uvalpha,
int y,
583 const int16_t *buf0 = buf[0], *buf1 = buf[1],
584 *ubuf0 = ubuf[0], *ubuf1 = ubuf[1],
585 *vbuf0 = vbuf[0], *vbuf1 = vbuf[1];
586 int yalpha1 = 4096 - yalpha;
587 int uvalpha1 = 4096 - uvalpha;
590 for (i = 0; i < ((dstW + 1) >> 1); i++) {
591 int Y1 = (buf0[i * 2] * yalpha1 + buf1[i * 2] * yalpha) >> 19;
592 int Y2 = (buf0[i * 2 + 1] * yalpha1 + buf1[i * 2 + 1] * yalpha) >> 19;
593 int U = (ubuf0[i] * uvalpha1 + ubuf1[i] * uvalpha) >> 19;
594 int V = (vbuf0[i] * uvalpha1 + vbuf1[i] * uvalpha) >> 19;
596 if ((Y1 | Y2 | U | V) & 0x100) {
597 Y1 = av_clip_uint8(Y1);
598 Y2 = av_clip_uint8(Y2);
599 U = av_clip_uint8(U);
600 V = av_clip_uint8(V);
609 const int16_t *ubuf[2],
const int16_t *vbuf[2],
610 const int16_t *abuf0,
uint8_t *dest,
int dstW,
613 const int16_t *ubuf0 = ubuf[0], *vbuf0 = vbuf[0];
616 if (uvalpha < 2048) {
617 for (i = 0; i < ((dstW + 1) >> 1); i++) {
618 int Y1 = (buf0[i * 2 ]+64) >> 7;
619 int Y2 = (buf0[i * 2 + 1]+64) >> 7;
620 int U = (ubuf0[i] +64) >> 7;
621 int V = (vbuf0[i] +64) >> 7;
623 if ((Y1 | Y2 | U | V) & 0x100) {
624 Y1 = av_clip_uint8(Y1);
625 Y2 = av_clip_uint8(Y2);
626 U = av_clip_uint8(U);
627 V = av_clip_uint8(V);
630 Y1 = av_clip_uint8(Y1);
631 Y2 = av_clip_uint8(Y2);
632 U = av_clip_uint8(U);
633 V = av_clip_uint8(V);
638 const int16_t *ubuf1 = ubuf[1], *vbuf1 = vbuf[1];
639 for (i = 0; i < ((dstW + 1) >> 1); i++) {
640 int Y1 = (buf0[i * 2 ] + 64) >> 7;
641 int Y2 = (buf0[i * 2 + 1] + 64) >> 7;
642 int U = (ubuf0[i] + ubuf1[i]+128) >> 8;
643 int V = (vbuf0[i] + vbuf1[i]+128) >> 8;
645 if ((Y1 | Y2 | U | V) & 0x100) {
646 Y1 = av_clip_uint8(Y1);
647 Y2 = av_clip_uint8(Y2);
648 U = av_clip_uint8(U);
649 V = av_clip_uint8(V);
652 Y1 = av_clip_uint8(Y1);
653 Y2 = av_clip_uint8(Y2);
654 U = av_clip_uint8(U);
655 V = av_clip_uint8(V);
667 #define R_B ((target == AV_PIX_FMT_RGB48LE || target == AV_PIX_FMT_RGB48BE) ? R : B)
668 #define B_R ((target == AV_PIX_FMT_RGB48LE || target == AV_PIX_FMT_RGB48BE) ? B : R)
669 #define output_pixel(pos, val) \
670 if (isBE(target)) { \
678 const int32_t **lumSrc,
int lumFilterSize,
679 const int16_t *chrFilter,
const int32_t **chrUSrc,
680 const int32_t **chrVSrc,
int chrFilterSize,
681 const int32_t **alpSrc, uint16_t *dest,
int dstW,
686 for (i = 0; i < ((dstW + 1) >> 1); i++) {
688 int Y1 = -0x40000000;
689 int Y2 = -0x40000000;
694 for (j = 0; j < lumFilterSize; j++) {
695 Y1 += lumSrc[j][i * 2] * (unsigned)lumFilter[j];
696 Y2 += lumSrc[j][i * 2 + 1] * (unsigned)lumFilter[j];
698 for (j = 0; j < chrFilterSize; j++) {;
699 U += chrUSrc[j][i] * (unsigned)chrFilter[j];
700 V += chrVSrc[j][i] * (unsigned)chrFilter[j];
726 output_pixel(&dest[1], av_clip_uintp2( G + Y1, 30) >> 14);
729 output_pixel(&dest[4], av_clip_uintp2( G + Y2, 30) >> 14);
738 const int32_t *abuf[2], uint16_t *dest,
int dstW,
739 int yalpha,
int uvalpha,
int y,
742 const int32_t *buf0 = buf[0], *buf1 = buf[1],
743 *ubuf0 = ubuf[0], *ubuf1 = ubuf[1],
744 *vbuf0 = vbuf[0], *vbuf1 = vbuf[1];
745 int yalpha1 = 4096 - yalpha;
746 int uvalpha1 = 4096 - uvalpha;
749 for (i = 0; i < ((dstW + 1) >> 1); i++) {
750 int Y1 = (buf0[i * 2] * yalpha1 + buf1[i * 2] * yalpha) >> 14;
751 int Y2 = (buf0[i * 2 + 1] * yalpha1 + buf1[i * 2 + 1] * yalpha) >> 14;
752 int U = (ubuf0[i] * uvalpha1 + ubuf1[i] * uvalpha + (-128 << 23)) >> 14;
753 int V = (vbuf0[i] * uvalpha1 + vbuf1[i] * uvalpha + (-128 << 23)) >> 14;
768 output_pixel(&dest[1], av_clip_uintp2( G + Y1, 30) >> 14);
771 output_pixel(&dest[4], av_clip_uintp2( G + Y2, 30) >> 14);
780 const int32_t *abuf0, uint16_t *dest,
int dstW,
783 const int32_t *ubuf0 = ubuf[0], *vbuf0 = vbuf[0];
786 if (uvalpha < 2048) {
787 for (i = 0; i < ((dstW + 1) >> 1); i++) {
788 int Y1 = (buf0[i * 2] ) >> 2;
789 int Y2 = (buf0[i * 2 + 1]) >> 2;
790 int U = (ubuf0[i] + (-128 << 11)) >> 2;
791 int V = (vbuf0[i] + (-128 << 11)) >> 2;
806 output_pixel(&dest[1], av_clip_uintp2( G + Y1, 30) >> 14);
809 output_pixel(&dest[4], av_clip_uintp2( G + Y2, 30) >> 14);
814 const int32_t *ubuf1 = ubuf[1], *vbuf1 = vbuf[1];
815 for (i = 0; i < ((dstW + 1) >> 1); i++) {
816 int Y1 = (buf0[i * 2] ) >> 2;
817 int Y2 = (buf0[i * 2 + 1]) >> 2;
818 int U = (ubuf0[i] + ubuf1[i] + (-128 << 12)) >> 3;
819 int V = (vbuf0[i] + vbuf1[i] + (-128 << 12)) >> 3;
834 output_pixel(&dest[1], av_clip_uintp2( G + Y1, 30) >> 14);
837 output_pixel(&dest[4], av_clip_uintp2( G + Y2, 30) >> 14);
848 #define YUV2PACKED16WRAPPER(name, base, ext, fmt) \
849 static void name ## ext ## _X_c(SwsContext *c, const int16_t *lumFilter, \
850 const int16_t **_lumSrc, int lumFilterSize, \
851 const int16_t *chrFilter, const int16_t **_chrUSrc, \
852 const int16_t **_chrVSrc, int chrFilterSize, \
853 const int16_t **_alpSrc, uint8_t *_dest, int dstW, \
856 const int32_t **lumSrc = (const int32_t **) _lumSrc, \
857 **chrUSrc = (const int32_t **) _chrUSrc, \
858 **chrVSrc = (const int32_t **) _chrVSrc, \
859 **alpSrc = (const int32_t **) _alpSrc; \
860 uint16_t *dest = (uint16_t *) _dest; \
861 name ## base ## _X_c_template(c, lumFilter, lumSrc, lumFilterSize, \
862 chrFilter, chrUSrc, chrVSrc, chrFilterSize, \
863 alpSrc, dest, dstW, y, fmt); \
866 static void name ## ext ## _2_c(SwsContext *c, const int16_t *_buf[2], \
867 const int16_t *_ubuf[2], const int16_t *_vbuf[2], \
868 const int16_t *_abuf[2], uint8_t *_dest, int dstW, \
869 int yalpha, int uvalpha, int y) \
871 const int32_t **buf = (const int32_t **) _buf, \
872 **ubuf = (const int32_t **) _ubuf, \
873 **vbuf = (const int32_t **) _vbuf, \
874 **abuf = (const int32_t **) _abuf; \
875 uint16_t *dest = (uint16_t *) _dest; \
876 name ## base ## _2_c_template(c, buf, ubuf, vbuf, abuf, \
877 dest, dstW, yalpha, uvalpha, y, fmt); \
880 static void name ## ext ## _1_c(SwsContext *c, const int16_t *_buf0, \
881 const int16_t *_ubuf[2], const int16_t *_vbuf[2], \
882 const int16_t *_abuf0, uint8_t *_dest, int dstW, \
883 int uvalpha, int y) \
885 const int32_t *buf0 = (const int32_t *) _buf0, \
886 **ubuf = (const int32_t **) _ubuf, \
887 **vbuf = (const int32_t **) _vbuf, \
888 *abuf0 = (const int32_t *) _abuf0; \
889 uint16_t *dest = (uint16_t *) _dest; \
890 name ## base ## _1_c_template(c, buf0, ubuf, vbuf, abuf0, dest, \
891 dstW, uvalpha, y, fmt); \
909 unsigned A1,
unsigned A2,
910 const
void *_r, const
void *_g, const
void *_b,
int y,
915 uint32_t *dest = (uint32_t *) _dest;
916 const uint32_t *
r = (
const uint32_t *) _r;
917 const uint32_t *
g = (
const uint32_t *) _g;
918 const uint32_t *
b = (
const uint32_t *) _b;
923 dest[i * 2 + 0] = r[Y1] + g[Y1] + b[Y1] + (hasAlpha ? A1 << sh : 0);
924 dest[i * 2 + 1] = r[Y2] + g[Y2] + b[Y2] + (hasAlpha ? A2 << sh : 0);
929 dest[i * 2 + 0] = r[Y1] + g[Y1] + b[Y1] + (A1 << sh);
930 dest[i * 2 + 1] = r[Y2] + g[Y2] + b[Y2] + (A2 << sh);
932 dest[i * 2 + 0] = r[Y1] + g[Y1] + b[Y1];
933 dest[i * 2 + 1] = r[Y2] + g[Y2] + b[Y2];
942 #define r_b ((target == AV_PIX_FMT_RGB24) ? r : b)
943 #define b_r ((target == AV_PIX_FMT_RGB24) ? b : r)
945 dest[i * 6 + 0] =
r_b[Y1];
946 dest[i * 6 + 1] = g[Y1];
947 dest[i * 6 + 2] =
b_r[Y1];
948 dest[i * 6 + 3] =
r_b[Y2];
949 dest[i * 6 + 4] = g[Y2];
950 dest[i * 6 + 5] =
b_r[Y2];
956 uint16_t *dest = (uint16_t *) _dest;
957 const uint16_t *
r = (
const uint16_t *) _r;
958 const uint16_t *
g = (
const uint16_t *) _g;
959 const uint16_t *
b = (
const uint16_t *) _b;
960 int dr1, dg1, db1, dr2, dg2, db2;
985 dest[i * 2 + 0] = r[Y1 + dr1] + g[Y1 + dg1] + b[Y1 + db1];
986 dest[i * 2 + 1] = r[Y2 + dr2] + g[Y2 + dg2] + b[Y2 + db2];
992 int dr1, dg1, db1, dr2, dg2, db2;
997 dr1 = dg1 = d32[(i * 2 + 0) & 7];
998 db1 = d64[(i * 2 + 0) & 7];
999 dr2 = dg2 = d32[(i * 2 + 1) & 7];
1000 db2 = d64[(i * 2 + 1) & 7];
1004 dr1 = db1 = d128[(i * 2 + 0) & 7];
1005 dg1 = d64[(i * 2 + 0) & 7];
1006 dr2 = db2 = d128[(i * 2 + 1) & 7];
1007 dg2 = d64[(i * 2 + 1) & 7];
1011 dest[i] = r[Y1 + dr1] + g[Y1 + dg1] + b[Y1 + db1] +
1012 ((r[Y2 + dr2] + g[Y2 + dg2] + b[Y2 + db2]) << 4);
1014 dest[i * 2 + 0] = r[Y1 + dr1] + g[Y1 + dg1] + b[Y1 + db1];
1015 dest[i * 2 + 1] = r[Y2 + dr2] + g[Y2 + dg2] + b[Y2 + db2];
1022 const int16_t **lumSrc,
int lumFilterSize,
1023 const int16_t *chrFilter,
const int16_t **chrUSrc,
1024 const int16_t **chrVSrc,
int chrFilterSize,
1025 const int16_t **alpSrc,
uint8_t *dest,
int dstW,
1030 for (i = 0; i < ((dstW + 1) >> 1); i++) {
1036 const void *
r, *
g, *
b;
1038 for (j = 0; j < lumFilterSize; j++) {
1039 Y1 += lumSrc[j][i * 2] * lumFilter[j];
1040 Y2 += lumSrc[j][i * 2 + 1] * lumFilter[j];
1042 for (j = 0; j < chrFilterSize; j++) {
1043 U += chrUSrc[j][i] * chrFilter[j];
1044 V += chrVSrc[j][i] * chrFilter[j];
1053 for (j = 0; j < lumFilterSize; j++) {
1054 A1 += alpSrc[j][i * 2 ] * lumFilter[j];
1055 A2 += alpSrc[j][i * 2 + 1] * lumFilter[j];
1059 if ((A1 | A2) & 0x100) {
1060 A1 = av_clip_uint8(A1);
1061 A2 = av_clip_uint8(A2);
1069 yuv2rgb_write(dest, i, Y1, Y2, hasAlpha ? A1 : 0, hasAlpha ? A2 : 0,
1070 r, g, b, y, target, hasAlpha);
1076 const int16_t *ubuf[2],
const int16_t *vbuf[2],
1077 const int16_t *abuf[2],
uint8_t *dest,
int dstW,
1078 int yalpha,
int uvalpha,
int y,
1081 const int16_t *buf0 = buf[0], *buf1 = buf[1],
1082 *ubuf0 = ubuf[0], *ubuf1 = ubuf[1],
1083 *vbuf0 = vbuf[0], *vbuf1 = vbuf[1],
1084 *abuf0 = hasAlpha ? abuf[0] :
NULL,
1085 *abuf1 = hasAlpha ? abuf[1] :
NULL;
1086 int yalpha1 = 4096 - yalpha;
1087 int uvalpha1 = 4096 - uvalpha;
1090 for (i = 0; i < ((dstW + 1) >> 1); i++) {
1091 int Y1 = (buf0[i * 2] * yalpha1 + buf1[i * 2] * yalpha) >> 19;
1092 int Y2 = (buf0[i * 2 + 1] * yalpha1 + buf1[i * 2 + 1] * yalpha) >> 19;
1093 int U = (ubuf0[i] * uvalpha1 + ubuf1[i] * uvalpha) >> 19;
1094 int V = (vbuf0[i] * uvalpha1 + vbuf1[i] * uvalpha) >> 19;
1101 A1 = (abuf0[i * 2 ] * yalpha1 + abuf1[i * 2 ] * yalpha) >> 19;
1102 A2 = (abuf0[i * 2 + 1] * yalpha1 + abuf1[i * 2 + 1] * yalpha) >> 19;
1103 A1 = av_clip_uint8(A1);
1104 A2 = av_clip_uint8(A2);
1107 yuv2rgb_write(dest, i, Y1, Y2, hasAlpha ? A1 : 0, hasAlpha ? A2 : 0,
1108 r,
g,
b, y, target, hasAlpha);
1114 const int16_t *ubuf[2],
const int16_t *vbuf[2],
1115 const int16_t *abuf0,
uint8_t *dest,
int dstW,
1119 const int16_t *ubuf0 = ubuf[0], *vbuf0 = vbuf[0];
1122 if (uvalpha < 2048) {
1123 for (i = 0; i < ((dstW + 1) >> 1); i++) {
1124 int Y1 = (buf0[i * 2 ] + 64) >> 7;
1125 int Y2 = (buf0[i * 2 + 1] + 64) >> 7;
1126 int U = (ubuf0[i] + 64) >> 7;
1127 int V = (vbuf0[i] + 64) >> 7;
1134 A1 = abuf0[i * 2 ] * 255 + 16384 >> 15;
1135 A2 = abuf0[i * 2 + 1] * 255 + 16384 >> 15;
1136 A1 = av_clip_uint8(A1);
1137 A2 = av_clip_uint8(A2);
1140 yuv2rgb_write(dest, i, Y1, Y2, hasAlpha ? A1 : 0, hasAlpha ? A2 : 0,
1141 r,
g,
b, y, target, hasAlpha);
1144 const int16_t *ubuf1 = ubuf[1], *vbuf1 = vbuf[1];
1145 for (i = 0; i < ((dstW + 1) >> 1); i++) {
1146 int Y1 = (buf0[i * 2 ] + 64) >> 7;
1147 int Y2 = (buf0[i * 2 + 1] + 64) >> 7;
1148 int U = (ubuf0[i] + ubuf1[i] + 128) >> 8;
1149 int V = (vbuf0[i] + vbuf1[i] + 128) >> 8;
1156 A1 = (abuf0[i * 2 ] + 64) >> 7;
1157 A2 = (abuf0[i * 2 + 1] + 64) >> 7;
1158 A1 = av_clip_uint8(A1);
1159 A2 = av_clip_uint8(A2);
1162 yuv2rgb_write(dest, i, Y1, Y2, hasAlpha ? A1 : 0, hasAlpha ? A2 : 0,
1163 r,
g,
b, y, target, hasAlpha);
1168 #define YUV2RGBWRAPPERX(name, base, ext, fmt, hasAlpha) \
1169 static void name ## ext ## _X_c(SwsContext *c, const int16_t *lumFilter, \
1170 const int16_t **lumSrc, int lumFilterSize, \
1171 const int16_t *chrFilter, const int16_t **chrUSrc, \
1172 const int16_t **chrVSrc, int chrFilterSize, \
1173 const int16_t **alpSrc, uint8_t *dest, int dstW, \
1176 name ## base ## _X_c_template(c, lumFilter, lumSrc, lumFilterSize, \
1177 chrFilter, chrUSrc, chrVSrc, chrFilterSize, \
1178 alpSrc, dest, dstW, y, fmt, hasAlpha); \
1180 #define YUV2RGBWRAPPER(name, base, ext, fmt, hasAlpha) \
1181 YUV2RGBWRAPPERX(name, base, ext, fmt, hasAlpha) \
1182 static void name ## ext ## _2_c(SwsContext *c, const int16_t *buf[2], \
1183 const int16_t *ubuf[2], const int16_t *vbuf[2], \
1184 const int16_t *abuf[2], uint8_t *dest, int dstW, \
1185 int yalpha, int uvalpha, int y) \
1187 name ## base ## _2_c_template(c, buf, ubuf, vbuf, abuf, \
1188 dest, dstW, yalpha, uvalpha, y, fmt, hasAlpha); \
1191 static void name ## ext ## _1_c(SwsContext *c, const int16_t *buf0, \
1192 const int16_t *ubuf[2], const int16_t *vbuf[2], \
1193 const int16_t *abuf0, uint8_t *dest, int dstW, \
1194 int uvalpha, int y) \
1196 name ## base ## _1_c_template(c, buf0, ubuf, vbuf, abuf0, dest, \
1197 dstW, uvalpha, y, fmt, hasAlpha); \
1204 #if CONFIG_SWSCALE_ALPHA
1222 const int16_t **lumSrc,
int lumFilterSize,
1223 const int16_t *chrFilter, const int16_t **chrUSrc,
1224 const int16_t **chrVSrc,
int chrFilterSize,
1225 const int16_t **alpSrc,
uint8_t *dest,
1233 for (i = 0; i < dstW; i++) {
1236 int U = (1<<9)-(128 << 19);
1237 int V = (1<<9)-(128 << 19);
1240 for (j = 0; j < lumFilterSize; j++) {
1241 Y += lumSrc[j][i] * lumFilter[j];
1243 for (j = 0; j < chrFilterSize; j++) {
1244 U += chrUSrc[j][i] * chrFilter[j];
1245 V += chrVSrc[j][i] * chrFilter[j];
1252 for (j = 0; j < lumFilterSize; j++) {
1253 A += alpSrc[j][i] * lumFilter[j];
1257 A = av_clip_uint8(A);
1259 Y -= c->yuv2rgb_y_offset;
1260 Y *= c->yuv2rgb_y_coeff;
1262 R = Y + V*c->yuv2rgb_v2r_coeff;
1263 G = Y + V*c->yuv2rgb_v2g_coeff + U*c->yuv2rgb_u2g_coeff;
1264 B = Y + U*c->yuv2rgb_u2b_coeff;
1265 if ((R | G | B) & 0xC0000000) {
1266 R = av_clip_uintp2(R, 30);
1267 G = av_clip_uintp2(G, 30);
1268 B = av_clip_uintp2(B, 30);
1273 dest[0] = hasAlpha ? A : 255;
1287 dest[3] = hasAlpha ? A : 255;
1290 dest[0] = hasAlpha ? A : 255;
1295 case AV_PIX_FMT_BGR24:
1304 dest[3] = hasAlpha ? A : 255;
1307 case AV_PIX_FMT_RGB4_BYTE:
1309 case AV_PIX_FMT_RGB8:
1315 R += (7*err[0] + 1*c->dither_error[0][i] + 5*c->dither_error[0][i+1] + 3*c->dither_error[0][i+2])>>4;
1316 G += (7*err[1] + 1*c->dither_error[1][i] + 5*c->dither_error[1][i+1] + 3*c->dither_error[1][i+2])>>4;
1317 B += (7*err[2] + 1*c->dither_error[2][i] + 5*c->dither_error[2][i+1] + 3*c->dither_error[2][i+2])>>4;
1318 c->dither_error[0][i] = err[0];
1319 c->dither_error[1][i] = err[1];
1320 c->dither_error[2][i] = err[2];
1321 r = R >> (isrgb8 ? 5 : 7);
1322 g = G >> (isrgb8 ? 5 : 6);
1323 b = B >> (isrgb8 ? 6 : 7);
1324 r = av_clip(r, 0, isrgb8 ? 7 : 1);
1325 g = av_clip(g, 0, isrgb8 ? 7 : 3);
1326 b = av_clip(b, 0, isrgb8 ? 3 : 1);
1327 err[0] = R - r*(isrgb8 ? 36 : 255);
1328 err[1] = G - g*(isrgb8 ? 36 : 85);
1329 err[2] = B - b*(isrgb8 ? 85 : 255);
1331 dest[0] = r + 2*g + 8*
b;
1332 }
else if(target == AV_PIX_FMT_RGB4_BYTE) {
1333 dest[0] = b + 2*g + 8*
r;
1335 dest[0] = r + 8*g + 64*
b;
1336 }
else if(target == AV_PIX_FMT_RGB8) {
1337 dest[0] = b + 4*g + 32*
r;
1345 c->dither_error[0][i] = err[0];
1346 c->dither_error[1][i] = err[1];
1347 c->dither_error[2][i] = err[2];
1356 #if CONFIG_SWSCALE_ALPHA
1377 const int16_t **lumSrc,
int lumFilterSize,
1378 const int16_t *chrFilter, const int16_t **chrUSrc,
1379 const int16_t **chrVSrc,
int chrFilterSize,
1380 const int16_t **alpSrc,
uint8_t **dest,
1386 uint16_t **dest16 = (uint16_t**)dest;
1389 for (i = 0; i < dstW; i++) {
1392 int U = (1 << 9) - (128 << 19);
1393 int V = (1 << 9) - (128 << 19);
1396 for (j = 0; j < lumFilterSize; j++)
1397 Y += lumSrc[j][i] * lumFilter[j];
1399 for (j = 0; j < chrFilterSize; j++) {
1400 U += chrUSrc[j][i] * chrFilter[j];
1401 V += chrVSrc[j][i] * chrFilter[j];
1411 for (j = 0; j < lumFilterSize; j++)
1412 A += alpSrc[j][i] * lumFilter[j];
1417 A = av_clip_uint8(A);
1420 Y -= c->yuv2rgb_y_offset;
1421 Y *= c->yuv2rgb_y_coeff;
1423 R = Y + V * c->yuv2rgb_v2r_coeff;
1424 G = Y + V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff;
1425 B = Y + U * c->yuv2rgb_u2b_coeff;
1427 if ((R | G | B) & 0xC0000000) {
1428 R = av_clip_uintp2(R, 30);
1429 G = av_clip_uintp2(G, 30);
1430 B = av_clip_uintp2(B, 30);
1434 dest16[0][i] = G >> SH;
1435 dest16[1][i] = B >> SH;
1436 dest16[2][i] = R >> SH;
1438 dest[0][i] = G >> 22;
1439 dest[1][i] = B >> 22;
1440 dest[2][i] = R >> 22;
1443 if (SH != 22 && (!
isBE(c->dstFormat)) != (!HAVE_BIGENDIAN)) {
1444 for (i = 0; i < dstW; i++) {
1465 *yuv2planeX =
isBE(dstFormat) ? yuv2planeX_16BE_c : yuv2planeX_16LE_c;
1466 *yuv2plane1 =
isBE(dstFormat) ? yuv2plane1_16BE_c : yuv2plane1_16LE_c;
1469 *yuv2planeX =
isBE(dstFormat) ? yuv2planeX_9BE_c : yuv2planeX_9LE_c;
1470 *yuv2plane1 =
isBE(dstFormat) ? yuv2plane1_9BE_c : yuv2plane1_9LE_c;
1472 *yuv2planeX =
isBE(dstFormat) ? yuv2planeX_10BE_c : yuv2planeX_10LE_c;
1473 *yuv2plane1 =
isBE(dstFormat) ? yuv2plane1_10BE_c : yuv2plane1_10LE_c;
1475 *yuv2planeX =
isBE(dstFormat) ? yuv2planeX_12BE_c : yuv2planeX_12LE_c;
1476 *yuv2plane1 =
isBE(dstFormat) ? yuv2plane1_12BE_c : yuv2plane1_12LE_c;
1478 *yuv2planeX =
isBE(dstFormat) ? yuv2planeX_14BE_c : yuv2planeX_14LE_c;
1479 *yuv2plane1 =
isBE(dstFormat) ? yuv2plane1_14BE_c : yuv2plane1_14LE_c;
1484 *yuv2planeX = yuv2planeX_8_c;
1490 switch (dstFormat) {
1491 case AV_PIX_FMT_RGBA:
1493 *yuv2packedX = yuv2rgba32_full_X_c;
1495 #if CONFIG_SWSCALE_ALPHA
1497 *yuv2packedX = yuv2rgba32_full_X_c;
1501 *yuv2packedX = yuv2rgbx32_full_X_c;
1505 case AV_PIX_FMT_ARGB:
1507 *yuv2packedX = yuv2argb32_full_X_c;
1509 #if CONFIG_SWSCALE_ALPHA
1511 *yuv2packedX = yuv2argb32_full_X_c;
1515 *yuv2packedX = yuv2xrgb32_full_X_c;
1521 *yuv2packedX = yuv2bgra32_full_X_c;
1523 #if CONFIG_SWSCALE_ALPHA
1525 *yuv2packedX = yuv2bgra32_full_X_c;
1529 *yuv2packedX = yuv2bgrx32_full_X_c;
1533 case AV_PIX_FMT_ABGR:
1535 *yuv2packedX = yuv2abgr32_full_X_c;
1537 #if CONFIG_SWSCALE_ALPHA
1539 *yuv2packedX = yuv2abgr32_full_X_c;
1543 *yuv2packedX = yuv2xbgr32_full_X_c;
1548 *yuv2packedX = yuv2rgb24_full_X_c;
1551 *yuv2packedX = yuv2bgr24_full_X_c;
1554 *yuv2packedX = yuv2bgr4_byte_full_X_c;
1557 *yuv2packedX = yuv2rgb4_byte_full_X_c;
1560 *yuv2packedX = yuv2bgr8_full_X_c;
1563 *yuv2packedX = yuv2rgb8_full_X_c;
1579 if (!*yuv2packedX && !*yuv2anyX)
1583 switch (dstFormat) {
1585 *yuv2packed1 = yuv2rgb48le_1_c;
1586 *yuv2packed2 = yuv2rgb48le_2_c;
1587 *yuv2packedX = yuv2rgb48le_X_c;
1590 *yuv2packed1 = yuv2rgb48be_1_c;
1591 *yuv2packed2 = yuv2rgb48be_2_c;
1592 *yuv2packedX = yuv2rgb48be_X_c;
1595 *yuv2packed1 = yuv2bgr48le_1_c;
1596 *yuv2packed2 = yuv2bgr48le_2_c;
1597 *yuv2packedX = yuv2bgr48le_X_c;
1600 *yuv2packed1 = yuv2bgr48be_1_c;
1601 *yuv2packed2 = yuv2bgr48be_2_c;
1602 *yuv2packedX = yuv2bgr48be_X_c;
1604 case AV_PIX_FMT_RGB32:
1607 *yuv2packed1 = yuv2rgb32_1_c;
1608 *yuv2packed2 = yuv2rgb32_2_c;
1609 *yuv2packedX = yuv2rgb32_X_c;
1611 #if CONFIG_SWSCALE_ALPHA
1613 *yuv2packed1 = yuv2rgba32_1_c;
1614 *yuv2packed2 = yuv2rgba32_2_c;
1615 *yuv2packedX = yuv2rgba32_X_c;
1619 *yuv2packed1 = yuv2rgbx32_1_c;
1620 *yuv2packed2 = yuv2rgbx32_2_c;
1621 *yuv2packedX = yuv2rgbx32_X_c;
1628 *yuv2packed1 = yuv2rgb32_1_1_c;
1629 *yuv2packed2 = yuv2rgb32_1_2_c;
1630 *yuv2packedX = yuv2rgb32_1_X_c;
1632 #if CONFIG_SWSCALE_ALPHA
1634 *yuv2packed1 = yuv2rgba32_1_1_c;
1635 *yuv2packed2 = yuv2rgba32_1_2_c;
1636 *yuv2packedX = yuv2rgba32_1_X_c;
1640 *yuv2packed1 = yuv2rgbx32_1_1_c;
1641 *yuv2packed2 = yuv2rgbx32_1_2_c;
1642 *yuv2packedX = yuv2rgbx32_1_X_c;
1647 *yuv2packed1 = yuv2rgb24_1_c;
1648 *yuv2packed2 = yuv2rgb24_2_c;
1649 *yuv2packedX = yuv2rgb24_X_c;
1652 *yuv2packed1 = yuv2bgr24_1_c;
1653 *yuv2packed2 = yuv2bgr24_2_c;
1654 *yuv2packedX = yuv2bgr24_X_c;
1660 *yuv2packed1 = yuv2rgb16_1_c;
1661 *yuv2packed2 = yuv2rgb16_2_c;
1662 *yuv2packedX = yuv2rgb16_X_c;
1668 *yuv2packed1 = yuv2rgb15_1_c;
1669 *yuv2packed2 = yuv2rgb15_2_c;
1670 *yuv2packedX = yuv2rgb15_X_c;
1676 *yuv2packed1 = yuv2rgb12_1_c;
1677 *yuv2packed2 = yuv2rgb12_2_c;
1678 *yuv2packedX = yuv2rgb12_X_c;
1682 *yuv2packed1 = yuv2rgb8_1_c;
1683 *yuv2packed2 = yuv2rgb8_2_c;
1684 *yuv2packedX = yuv2rgb8_X_c;
1688 *yuv2packed1 = yuv2rgb4_1_c;
1689 *yuv2packed2 = yuv2rgb4_2_c;
1690 *yuv2packedX = yuv2rgb4_X_c;
1694 *yuv2packed1 = yuv2rgb4b_1_c;
1695 *yuv2packed2 = yuv2rgb4b_2_c;
1696 *yuv2packedX = yuv2rgb4b_X_c;
1700 switch (dstFormat) {
1702 *yuv2packed1 = yuv2monowhite_1_c;
1703 *yuv2packed2 = yuv2monowhite_2_c;
1704 *yuv2packedX = yuv2monowhite_X_c;
1707 *yuv2packed1 = yuv2monoblack_1_c;
1708 *yuv2packed2 = yuv2monoblack_2_c;
1709 *yuv2packedX = yuv2monoblack_X_c;
1712 *yuv2packed1 = yuv2yuyv422_1_c;
1713 *yuv2packed2 = yuv2yuyv422_2_c;
1714 *yuv2packedX = yuv2yuyv422_X_c;
1717 *yuv2packed1 = yuv2uyvy422_1_c;
1718 *yuv2packed2 = yuv2uyvy422_2_c;
1719 *yuv2packedX = yuv2uyvy422_X_c;