FFmpeg
output.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2001-2012 Michael Niedermayer <michaelni@gmx.at>
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include <math.h>
22 #include <stdint.h>
23 #include <stdio.h>
24 #include <string.h>
25 
26 #include "libavutil/attributes.h"
27 #include "libavutil/avutil.h"
28 #include "libavutil/avassert.h"
29 #include "libavutil/bswap.h"
30 #include "libavutil/cpu.h"
31 #include "libavutil/intreadwrite.h"
32 #include "libavutil/mathematics.h"
33 #include "libavutil/mem_internal.h"
34 #include "libavutil/pixdesc.h"
35 #include "config.h"
36 #include "rgb2rgb.h"
37 #include "swscale.h"
38 #include "swscale_internal.h"
39 
41 { 1, 3, 1, 3, 1, 3, 1, 3, },
42 { 2, 0, 2, 0, 2, 0, 2, 0, },
43 { 1, 3, 1, 3, 1, 3, 1, 3, },
44 };
45 
47 { 6, 2, 6, 2, 6, 2, 6, 2, },
48 { 0, 4, 0, 4, 0, 4, 0, 4, },
49 { 6, 2, 6, 2, 6, 2, 6, 2, },
50 };
51 
53 { 8, 4, 11, 7, 8, 4, 11, 7, },
54 { 2, 14, 1, 13, 2, 14, 1, 13, },
55 { 10, 6, 9, 5, 10, 6, 9, 5, },
56 { 0, 12, 3, 15, 0, 12, 3, 15, },
57 { 8, 4, 11, 7, 8, 4, 11, 7, },
58 };
59 
61 { 17, 9, 23, 15, 16, 8, 22, 14, },
62 { 5, 29, 3, 27, 4, 28, 2, 26, },
63 { 21, 13, 19, 11, 20, 12, 18, 10, },
64 { 0, 24, 6, 30, 1, 25, 7, 31, },
65 { 16, 8, 22, 14, 17, 9, 23, 15, },
66 { 4, 28, 2, 26, 5, 29, 3, 27, },
67 { 20, 12, 18, 10, 21, 13, 19, 11, },
68 { 1, 25, 7, 31, 0, 24, 6, 30, },
69 { 17, 9, 23, 15, 16, 8, 22, 14, },
70 };
71 
73 { 0, 55, 14, 68, 3, 58, 17, 72, },
74 { 37, 18, 50, 32, 40, 22, 54, 35, },
75 { 9, 64, 5, 59, 13, 67, 8, 63, },
76 { 46, 27, 41, 23, 49, 31, 44, 26, },
77 { 2, 57, 16, 71, 1, 56, 15, 70, },
78 { 39, 21, 52, 34, 38, 19, 51, 33, },
79 { 11, 66, 7, 62, 10, 65, 6, 60, },
80 { 48, 30, 43, 25, 47, 29, 42, 24, },
81 { 0, 55, 14, 68, 3, 58, 17, 72, },
82 };
83 
84 #if 1
86 {117, 62, 158, 103, 113, 58, 155, 100, },
87 { 34, 199, 21, 186, 31, 196, 17, 182, },
88 {144, 89, 131, 76, 141, 86, 127, 72, },
89 { 0, 165, 41, 206, 10, 175, 52, 217, },
90 {110, 55, 151, 96, 120, 65, 162, 107, },
91 { 28, 193, 14, 179, 38, 203, 24, 189, },
92 {138, 83, 124, 69, 148, 93, 134, 79, },
93 { 7, 172, 48, 213, 3, 168, 45, 210, },
94 {117, 62, 158, 103, 113, 58, 155, 100, },
95 };
96 #elif 1
97 // tries to correct a gamma of 1.5
98 DECLARE_ALIGNED(8, const uint8_t, ff_dither_8x8_220)[][8] = {
99 { 0, 143, 18, 200, 2, 156, 25, 215, },
100 { 78, 28, 125, 64, 89, 36, 138, 74, },
101 { 10, 180, 3, 161, 16, 195, 8, 175, },
102 {109, 51, 93, 38, 121, 60, 105, 47, },
103 { 1, 152, 23, 210, 0, 147, 20, 205, },
104 { 85, 33, 134, 71, 81, 30, 130, 67, },
105 { 14, 190, 6, 171, 12, 185, 5, 166, },
106 {117, 57, 101, 44, 113, 54, 97, 41, },
107 { 0, 143, 18, 200, 2, 156, 25, 215, },
108 };
109 #elif 1
110 // tries to correct a gamma of 2.0
111 DECLARE_ALIGNED(8, const uint8_t, ff_dither_8x8_220)[][8] = {
112 { 0, 124, 8, 193, 0, 140, 12, 213, },
113 { 55, 14, 104, 42, 66, 19, 119, 52, },
114 { 3, 168, 1, 145, 6, 187, 3, 162, },
115 { 86, 31, 70, 21, 99, 39, 82, 28, },
116 { 0, 134, 11, 206, 0, 129, 9, 200, },
117 { 62, 17, 114, 48, 58, 16, 109, 45, },
118 { 5, 181, 2, 157, 4, 175, 1, 151, },
119 { 95, 36, 78, 26, 90, 34, 74, 24, },
120 { 0, 124, 8, 193, 0, 140, 12, 213, },
121 };
122 #else
123 // tries to correct a gamma of 2.5
124 DECLARE_ALIGNED(8, const uint8_t, ff_dither_8x8_220)[][8] = {
125 { 0, 107, 3, 187, 0, 125, 6, 212, },
126 { 39, 7, 86, 28, 49, 11, 102, 36, },
127 { 1, 158, 0, 131, 3, 180, 1, 151, },
128 { 68, 19, 52, 12, 81, 25, 64, 17, },
129 { 0, 119, 5, 203, 0, 113, 4, 195, },
130 { 45, 9, 96, 33, 42, 8, 91, 30, },
131 { 2, 172, 1, 144, 2, 165, 0, 137, },
132 { 77, 23, 60, 15, 72, 21, 56, 14, },
133 { 0, 107, 3, 187, 0, 125, 6, 212, },
134 };
135 #endif
136 
137 #define output_pixel(pos, val, bias, signedness) \
138  if (big_endian) { \
139  AV_WB16(pos, bias + av_clip_ ## signedness ## 16(val >> shift)); \
140  } else { \
141  AV_WL16(pos, bias + av_clip_ ## signedness ## 16(val >> shift)); \
142  }
143 
144 static av_always_inline void
145 yuv2plane1_16_c_template(const int32_t *src, uint16_t *dest, int dstW,
146  int big_endian, int output_bits)
147 {
148  int i;
149  int shift = 3;
150  av_assert0(output_bits == 16);
151 
152  for (i = 0; i < dstW; i++) {
153  int val = src[i] + (1 << (shift - 1));
154  output_pixel(&dest[i], val, 0, uint);
155  }
156 }
157 
158 static av_always_inline void
159 yuv2planeX_16_c_template(const int16_t *filter, int filterSize,
160  const int32_t **src, uint16_t *dest, int dstW,
161  int big_endian, int output_bits)
162 {
163  int i;
164  int shift = 15;
165  av_assert0(output_bits == 16);
166 
167  for (i = 0; i < dstW; i++) {
168  int val = 1 << (shift - 1);
169  int j;
170 
171  /* range of val is [0,0x7FFFFFFF], so 31 bits, but with lanczos/spline
172  * filters (or anything with negative coeffs, the range can be slightly
173  * wider in both directions. To account for this overflow, we subtract
174  * a constant so it always fits in the signed range (assuming a
175  * reasonable filterSize), and re-add that at the end. */
176  val -= 0x40000000;
177  for (j = 0; j < filterSize; j++)
178  val += src[j][i] * (unsigned)filter[j];
179 
180  output_pixel(&dest[i], val, 0x8000, int);
181  }
182 }
183 
184 static void yuv2p016cX_c(enum AVPixelFormat dstFormat, const uint8_t *chrDither,
185  const int16_t *chrFilter, int chrFilterSize,
186  const int16_t **chrUSrc, const int16_t **chrVSrc,
187  uint8_t *dest8, int chrDstW)
188 {
189  uint16_t *dest = (uint16_t*)dest8;
190  const int32_t **uSrc = (const int32_t **)chrUSrc;
191  const int32_t **vSrc = (const int32_t **)chrVSrc;
192  int shift = 15;
193  int big_endian = dstFormat == AV_PIX_FMT_P016BE;
194  int i, j;
195 
196  for (i = 0; i < chrDstW; i++) {
197  int u = 1 << (shift - 1);
198  int v = 1 << (shift - 1);
199 
200  /* See yuv2planeX_16_c_template for details. */
201  u -= 0x40000000;
202  v -= 0x40000000;
203  for (j = 0; j < chrFilterSize; j++) {
204  u += uSrc[j][i] * (unsigned)chrFilter[j];
205  v += vSrc[j][i] * (unsigned)chrFilter[j];
206  }
207 
208  output_pixel(&dest[2*i] , u, 0x8000, int);
209  output_pixel(&dest[2*i+1], v, 0x8000, int);
210  }
211 }
212 
213 static av_always_inline void
214 yuv2plane1_float_c_template(const int32_t *src, float *dest, int dstW)
215 {
216  static const int big_endian = HAVE_BIGENDIAN;
217  static const int shift = 3;
218  static const float float_mult = 1.0f / 65535.0f;
219  int i, val;
220  uint16_t val_uint;
221 
222  for (i = 0; i < dstW; ++i){
223  val = src[i] + (1 << (shift - 1));
224  output_pixel(&val_uint, val, 0, uint);
225  dest[i] = float_mult * (float)val_uint;
226  }
227 }
228 
229 static av_always_inline void
230 yuv2plane1_float_bswap_c_template(const int32_t *src, uint32_t *dest, int dstW)
231 {
232  static const int big_endian = HAVE_BIGENDIAN;
233  static const int shift = 3;
234  static const float float_mult = 1.0f / 65535.0f;
235  int i, val;
236  uint16_t val_uint;
237 
238  for (i = 0; i < dstW; ++i){
239  val = src[i] + (1 << (shift - 1));
240  output_pixel(&val_uint, val, 0, uint);
241  dest[i] = av_bswap32(av_float2int(float_mult * (float)val_uint));
242  }
243 }
244 
245 static av_always_inline void
246 yuv2planeX_float_c_template(const int16_t *filter, int filterSize, const int32_t **src,
247  float *dest, int dstW)
248 {
249  static const int big_endian = HAVE_BIGENDIAN;
250  static const int shift = 15;
251  static const float float_mult = 1.0f / 65535.0f;
252  int i, j, val;
253  uint16_t val_uint;
254 
255  for (i = 0; i < dstW; ++i){
256  val = (1 << (shift - 1)) - 0x40000000;
257  for (j = 0; j < filterSize; ++j){
258  val += src[j][i] * (unsigned)filter[j];
259  }
260  output_pixel(&val_uint, val, 0x8000, int);
261  dest[i] = float_mult * (float)val_uint;
262  }
263 }
264 
265 static av_always_inline void
266 yuv2planeX_float_bswap_c_template(const int16_t *filter, int filterSize, const int32_t **src,
267  uint32_t *dest, int dstW)
268 {
269  static const int big_endian = HAVE_BIGENDIAN;
270  static const int shift = 15;
271  static const float float_mult = 1.0f / 65535.0f;
272  int i, j, val;
273  uint16_t val_uint;
274 
275  for (i = 0; i < dstW; ++i){
276  val = (1 << (shift - 1)) - 0x40000000;
277  for (j = 0; j < filterSize; ++j){
278  val += src[j][i] * (unsigned)filter[j];
279  }
280  output_pixel(&val_uint, val, 0x8000, int);
281  dest[i] = av_bswap32(av_float2int(float_mult * (float)val_uint));
282  }
283 }
284 
285 #define yuv2plane1_float(template, dest_type, BE_LE) \
286 static void yuv2plane1_float ## BE_LE ## _c(const int16_t *src, uint8_t *dest, int dstW, \
287  const uint8_t *dither, int offset) \
288 { \
289  template((const int32_t *)src, (dest_type *)dest, dstW); \
290 }
291 
292 #define yuv2planeX_float(template, dest_type, BE_LE) \
293 static void yuv2planeX_float ## BE_LE ## _c(const int16_t *filter, int filterSize, \
294  const int16_t **src, uint8_t *dest, int dstW, \
295  const uint8_t *dither, int offset) \
296 { \
297  template(filter, filterSize, (const int32_t **)src, (dest_type *)dest, dstW); \
298 }
299 
300 #if HAVE_BIGENDIAN
305 #else
307 yuv2plane1_float(yuv2plane1_float_bswap_c_template, uint32_t, BE)
308 yuv2planeX_float(yuv2planeX_float_c_template, float, LE)
309 yuv2planeX_float(yuv2planeX_float_bswap_c_template, uint32_t, BE)
310 #endif
311 
312 #undef output_pixel
313 
314 #define output_pixel(pos, val) \
315  if (big_endian) { \
316  AV_WB16(pos, av_clip_uintp2(val >> shift, output_bits)); \
317  } else { \
318  AV_WL16(pos, av_clip_uintp2(val >> shift, output_bits)); \
319  }
320 
321 static av_always_inline void
322 yuv2plane1_10_c_template(const int16_t *src, uint16_t *dest, int dstW,
323  int big_endian, int output_bits)
324 {
325  int i;
326  int shift = 15 - output_bits;
327 
328  for (i = 0; i < dstW; i++) {
329  int val = src[i] + (1 << (shift - 1));
330  output_pixel(&dest[i], val);
331  }
332 }
333 
334 static av_always_inline void
335 yuv2planeX_10_c_template(const int16_t *filter, int filterSize,
336  const int16_t **src, uint16_t *dest, int dstW,
337  int big_endian, int output_bits)
338 {
339  int i;
340  int shift = 11 + 16 - output_bits;
341 
342  for (i = 0; i < dstW; i++) {
343  int val = 1 << (shift - 1);
344  int j;
345 
346  for (j = 0; j < filterSize; j++)
347  val += src[j][i] * filter[j];
348 
349  output_pixel(&dest[i], val);
350  }
351 }
352 
353 #undef output_pixel
354 
355 #define yuv2NBPS(bits, BE_LE, is_be, template_size, typeX_t) \
356 static void yuv2plane1_ ## bits ## BE_LE ## _c(const int16_t *src, \
357  uint8_t *dest, int dstW, \
358  const uint8_t *dither, int offset)\
359 { \
360  yuv2plane1_ ## template_size ## _c_template((const typeX_t *) src, \
361  (uint16_t *) dest, dstW, is_be, bits); \
362 }\
363 static void yuv2planeX_ ## bits ## BE_LE ## _c(const int16_t *filter, int filterSize, \
364  const int16_t **src, uint8_t *dest, int dstW, \
365  const uint8_t *dither, int offset)\
366 { \
367  yuv2planeX_## template_size ## _c_template(filter, \
368  filterSize, (const typeX_t **) src, \
369  (uint16_t *) dest, dstW, is_be, bits); \
370 }
371 yuv2NBPS( 9, BE, 1, 10, int16_t)
372 yuv2NBPS( 9, LE, 0, 10, int16_t)
373 yuv2NBPS(10, BE, 1, 10, int16_t)
374 yuv2NBPS(10, LE, 0, 10, int16_t)
375 yuv2NBPS(12, BE, 1, 10, int16_t)
376 yuv2NBPS(12, LE, 0, 10, int16_t)
377 yuv2NBPS(14, BE, 1, 10, int16_t)
378 yuv2NBPS(14, LE, 0, 10, int16_t)
379 yuv2NBPS(16, BE, 1, 16, int32_t)
380 yuv2NBPS(16, LE, 0, 16, int32_t)
381 
382 static void yuv2planeX_8_c(const int16_t *filter, int filterSize,
383  const int16_t **src, uint8_t *dest, int dstW,
384  const uint8_t *dither, int offset)
385 {
386  int i;
387  for (i=0; i<dstW; i++) {
388  int val = dither[(i + offset) & 7] << 12;
389  int j;
390  for (j=0; j<filterSize; j++)
391  val += src[j][i] * filter[j];
392 
393  dest[i]= av_clip_uint8(val>>19);
394  }
395 }
396 
397 static void yuv2plane1_8_c(const int16_t *src, uint8_t *dest, int dstW,
398  const uint8_t *dither, int offset)
399 {
400  int i;
401  for (i=0; i<dstW; i++) {
402  int val = (src[i] + dither[(i + offset) & 7]) >> 7;
403  dest[i]= av_clip_uint8(val);
404  }
405 }
406 
407 static void yuv2nv12cX_c(enum AVPixelFormat dstFormat, const uint8_t *chrDither,
408  const int16_t *chrFilter, int chrFilterSize,
409  const int16_t **chrUSrc, const int16_t **chrVSrc,
410  uint8_t *dest, int chrDstW)
411 {
412  int i;
413 
414  if (dstFormat == AV_PIX_FMT_NV12 ||
415  dstFormat == AV_PIX_FMT_NV24)
416  for (i=0; i<chrDstW; i++) {
417  int u = chrDither[i & 7] << 12;
418  int v = chrDither[(i + 3) & 7] << 12;
419  int j;
420  for (j=0; j<chrFilterSize; j++) {
421  u += chrUSrc[j][i] * chrFilter[j];
422  v += chrVSrc[j][i] * chrFilter[j];
423  }
424 
425  dest[2*i]= av_clip_uint8(u>>19);
426  dest[2*i+1]= av_clip_uint8(v>>19);
427  }
428  else
429  for (i=0; i<chrDstW; i++) {
430  int u = chrDither[i & 7] << 12;
431  int v = chrDither[(i + 3) & 7] << 12;
432  int j;
433  for (j=0; j<chrFilterSize; j++) {
434  u += chrUSrc[j][i] * chrFilter[j];
435  v += chrVSrc[j][i] * chrFilter[j];
436  }
437 
438  dest[2*i]= av_clip_uint8(v>>19);
439  dest[2*i+1]= av_clip_uint8(u>>19);
440  }
441 }
442 
443 
444 #define output_pixel(pos, val) \
445  if (big_endian) { \
446  AV_WB16(pos, av_clip_uintp2(val >> shift, 10) << 6); \
447  } else { \
448  AV_WL16(pos, av_clip_uintp2(val >> shift, 10) << 6); \
449  }
450 
451 static void yuv2p010l1_c(const int16_t *src,
452  uint16_t *dest, int dstW,
453  int big_endian)
454 {
455  int i;
456  int shift = 5;
457 
458  for (i = 0; i < dstW; i++) {
459  int val = src[i] + (1 << (shift - 1));
460  output_pixel(&dest[i], val);
461  }
462 }
463 
464 static void yuv2p010lX_c(const int16_t *filter, int filterSize,
465  const int16_t **src, uint16_t *dest, int dstW,
466  int big_endian)
467 {
468  int i, j;
469  int shift = 17;
470 
471  for (i = 0; i < dstW; i++) {
472  int val = 1 << (shift - 1);
473 
474  for (j = 0; j < filterSize; j++)
475  val += src[j][i] * filter[j];
476 
477  output_pixel(&dest[i], val);
478  }
479 }
480 
481 static void yuv2p010cX_c(enum AVPixelFormat dstFormat, const uint8_t *chrDither,
482  const int16_t *chrFilter, int chrFilterSize,
483  const int16_t **chrUSrc, const int16_t **chrVSrc,
484  uint8_t *dest8, int chrDstW)
485 {
486  uint16_t *dest = (uint16_t*)dest8;
487  int shift = 17;
488  int big_endian = dstFormat == AV_PIX_FMT_P010BE;
489  int i, j;
490 
491  for (i = 0; i < chrDstW; i++) {
492  int u = 1 << (shift - 1);
493  int v = 1 << (shift - 1);
494 
495  for (j = 0; j < chrFilterSize; j++) {
496  u += chrUSrc[j][i] * chrFilter[j];
497  v += chrVSrc[j][i] * chrFilter[j];
498  }
499 
500  output_pixel(&dest[2*i] , u);
501  output_pixel(&dest[2*i+1], v);
502  }
503 }
504 
505 static void yuv2p010l1_LE_c(const int16_t *src,
506  uint8_t *dest, int dstW,
507  const uint8_t *dither, int offset)
508 {
509  yuv2p010l1_c(src, (uint16_t*)dest, dstW, 0);
510 }
511 
512 static void yuv2p010l1_BE_c(const int16_t *src,
513  uint8_t *dest, int dstW,
514  const uint8_t *dither, int offset)
515 {
516  yuv2p010l1_c(src, (uint16_t*)dest, dstW, 1);
517 }
518 
519 static void yuv2p010lX_LE_c(const int16_t *filter, int filterSize,
520  const int16_t **src, uint8_t *dest, int dstW,
521  const uint8_t *dither, int offset)
522 {
523  yuv2p010lX_c(filter, filterSize, src, (uint16_t*)dest, dstW, 0);
524 }
525 
526 static void yuv2p010lX_BE_c(const int16_t *filter, int filterSize,
527  const int16_t **src, uint8_t *dest, int dstW,
528  const uint8_t *dither, int offset)
529 {
530  yuv2p010lX_c(filter, filterSize, src, (uint16_t*)dest, dstW, 1);
531 }
532 
533 #undef output_pixel
534 
535 
536 #define accumulate_bit(acc, val) \
537  acc <<= 1; \
538  acc |= (val) >= 234
539 #define output_pixel(pos, acc) \
540  if (target == AV_PIX_FMT_MONOBLACK) { \
541  pos = acc; \
542  } else { \
543  pos = ~acc; \
544  }
545 
546 static av_always_inline void
547 yuv2mono_X_c_template(SwsContext *c, const int16_t *lumFilter,
548  const int16_t **lumSrc, int lumFilterSize,
549  const int16_t *chrFilter, const int16_t **chrUSrc,
550  const int16_t **chrVSrc, int chrFilterSize,
551  const int16_t **alpSrc, uint8_t *dest, int dstW,
552  int y, enum AVPixelFormat target)
553 {
554  const uint8_t * const d128 = ff_dither_8x8_220[y&7];
555  int i;
556  unsigned acc = 0;
557  int err = 0;
558 
559  for (i = 0; i < dstW; i += 2) {
560  int j;
561  int Y1 = 1 << 18;
562  int Y2 = 1 << 18;
563 
564  for (j = 0; j < lumFilterSize; j++) {
565  Y1 += lumSrc[j][i] * lumFilter[j];
566  Y2 += lumSrc[j][i+1] * lumFilter[j];
567  }
568  Y1 >>= 19;
569  Y2 >>= 19;
570  if ((Y1 | Y2) & 0x100) {
571  Y1 = av_clip_uint8(Y1);
572  Y2 = av_clip_uint8(Y2);
573  }
574  if (c->dither == SWS_DITHER_ED) {
575  Y1 += (7*err + 1*c->dither_error[0][i] + 5*c->dither_error[0][i+1] + 3*c->dither_error[0][i+2] + 8 - 256)>>4;
576  c->dither_error[0][i] = err;
577  acc = 2*acc + (Y1 >= 128);
578  Y1 -= 220*(acc&1);
579 
580  err = Y2 + ((7*Y1 + 1*c->dither_error[0][i+1] + 5*c->dither_error[0][i+2] + 3*c->dither_error[0][i+3] + 8 - 256)>>4);
581  c->dither_error[0][i+1] = Y1;
582  acc = 2*acc + (err >= 128);
583  err -= 220*(acc&1);
584  } else {
585  accumulate_bit(acc, Y1 + d128[(i + 0) & 7]);
586  accumulate_bit(acc, Y2 + d128[(i + 1) & 7]);
587  }
588  if ((i & 7) == 6) {
589  output_pixel(*dest++, acc);
590  }
591  }
592  c->dither_error[0][i] = err;
593 
594  if (i & 6) {
595  output_pixel(*dest, acc);
596  }
597 }
598 
599 static av_always_inline void
600 yuv2mono_2_c_template(SwsContext *c, const int16_t *buf[2],
601  const int16_t *ubuf[2], const int16_t *vbuf[2],
602  const int16_t *abuf[2], uint8_t *dest, int dstW,
603  int yalpha, int uvalpha, int y,
604  enum AVPixelFormat target)
605 {
606  const int16_t *buf0 = buf[0], *buf1 = buf[1];
607  const uint8_t * const d128 = ff_dither_8x8_220[y & 7];
608  int yalpha1 = 4096 - yalpha;
609  int i;
610  av_assert2(yalpha <= 4096U);
611 
612  if (c->dither == SWS_DITHER_ED) {
613  int err = 0;
614  int acc = 0;
615  for (i = 0; i < dstW; i +=2) {
616  int Y;
617 
618  Y = (buf0[i + 0] * yalpha1 + buf1[i + 0] * yalpha) >> 19;
619  Y += (7*err + 1*c->dither_error[0][i] + 5*c->dither_error[0][i+1] + 3*c->dither_error[0][i+2] + 8 - 256)>>4;
620  c->dither_error[0][i] = err;
621  acc = 2*acc + (Y >= 128);
622  Y -= 220*(acc&1);
623 
624  err = (buf0[i + 1] * yalpha1 + buf1[i + 1] * yalpha) >> 19;
625  err += (7*Y + 1*c->dither_error[0][i+1] + 5*c->dither_error[0][i+2] + 3*c->dither_error[0][i+3] + 8 - 256)>>4;
626  c->dither_error[0][i+1] = Y;
627  acc = 2*acc + (err >= 128);
628  err -= 220*(acc&1);
629 
630  if ((i & 7) == 6)
631  output_pixel(*dest++, acc);
632  }
633  c->dither_error[0][i] = err;
634  } else {
635  for (i = 0; i < dstW; i += 8) {
636  int Y, acc = 0;
637 
638  Y = (buf0[i + 0] * yalpha1 + buf1[i + 0] * yalpha) >> 19;
639  accumulate_bit(acc, Y + d128[0]);
640  Y = (buf0[i + 1] * yalpha1 + buf1[i + 1] * yalpha) >> 19;
641  accumulate_bit(acc, Y + d128[1]);
642  Y = (buf0[i + 2] * yalpha1 + buf1[i + 2] * yalpha) >> 19;
643  accumulate_bit(acc, Y + d128[2]);
644  Y = (buf0[i + 3] * yalpha1 + buf1[i + 3] * yalpha) >> 19;
645  accumulate_bit(acc, Y + d128[3]);
646  Y = (buf0[i + 4] * yalpha1 + buf1[i + 4] * yalpha) >> 19;
647  accumulate_bit(acc, Y + d128[4]);
648  Y = (buf0[i + 5] * yalpha1 + buf1[i + 5] * yalpha) >> 19;
649  accumulate_bit(acc, Y + d128[5]);
650  Y = (buf0[i + 6] * yalpha1 + buf1[i + 6] * yalpha) >> 19;
651  accumulate_bit(acc, Y + d128[6]);
652  Y = (buf0[i + 7] * yalpha1 + buf1[i + 7] * yalpha) >> 19;
653  accumulate_bit(acc, Y + d128[7]);
654 
655  output_pixel(*dest++, acc);
656  }
657  }
658 }
659 
660 static av_always_inline void
661 yuv2mono_1_c_template(SwsContext *c, const int16_t *buf0,
662  const int16_t *ubuf[2], const int16_t *vbuf[2],
663  const int16_t *abuf0, uint8_t *dest, int dstW,
664  int uvalpha, int y, enum AVPixelFormat target)
665 {
666  const uint8_t * const d128 = ff_dither_8x8_220[y & 7];
667  int i;
668 
669  if (c->dither == SWS_DITHER_ED) {
670  int err = 0;
671  int acc = 0;
672  for (i = 0; i < dstW; i +=2) {
673  int Y;
674 
675  Y = ((buf0[i + 0] + 64) >> 7);
676  Y += (7*err + 1*c->dither_error[0][i] + 5*c->dither_error[0][i+1] + 3*c->dither_error[0][i+2] + 8 - 256)>>4;
677  c->dither_error[0][i] = err;
678  acc = 2*acc + (Y >= 128);
679  Y -= 220*(acc&1);
680 
681  err = ((buf0[i + 1] + 64) >> 7);
682  err += (7*Y + 1*c->dither_error[0][i+1] + 5*c->dither_error[0][i+2] + 3*c->dither_error[0][i+3] + 8 - 256)>>4;
683  c->dither_error[0][i+1] = Y;
684  acc = 2*acc + (err >= 128);
685  err -= 220*(acc&1);
686 
687  if ((i & 7) == 6)
688  output_pixel(*dest++, acc);
689  }
690  c->dither_error[0][i] = err;
691  } else {
692  for (i = 0; i < dstW; i += 8) {
693  int acc = 0;
694  accumulate_bit(acc, ((buf0[i + 0] + 64) >> 7) + d128[0]);
695  accumulate_bit(acc, ((buf0[i + 1] + 64) >> 7) + d128[1]);
696  accumulate_bit(acc, ((buf0[i + 2] + 64) >> 7) + d128[2]);
697  accumulate_bit(acc, ((buf0[i + 3] + 64) >> 7) + d128[3]);
698  accumulate_bit(acc, ((buf0[i + 4] + 64) >> 7) + d128[4]);
699  accumulate_bit(acc, ((buf0[i + 5] + 64) >> 7) + d128[5]);
700  accumulate_bit(acc, ((buf0[i + 6] + 64) >> 7) + d128[6]);
701  accumulate_bit(acc, ((buf0[i + 7] + 64) >> 7) + d128[7]);
702 
703  output_pixel(*dest++, acc);
704  }
705  }
706 }
707 
708 #undef output_pixel
709 #undef accumulate_bit
710 
711 #define YUV2PACKEDWRAPPER(name, base, ext, fmt) \
712 static void name ## ext ## _X_c(SwsContext *c, const int16_t *lumFilter, \
713  const int16_t **lumSrc, int lumFilterSize, \
714  const int16_t *chrFilter, const int16_t **chrUSrc, \
715  const int16_t **chrVSrc, int chrFilterSize, \
716  const int16_t **alpSrc, uint8_t *dest, int dstW, \
717  int y) \
718 { \
719  name ## base ## _X_c_template(c, lumFilter, lumSrc, lumFilterSize, \
720  chrFilter, chrUSrc, chrVSrc, chrFilterSize, \
721  alpSrc, dest, dstW, y, fmt); \
722 } \
723  \
724 static void name ## ext ## _2_c(SwsContext *c, const int16_t *buf[2], \
725  const int16_t *ubuf[2], const int16_t *vbuf[2], \
726  const int16_t *abuf[2], uint8_t *dest, int dstW, \
727  int yalpha, int uvalpha, int y) \
728 { \
729  name ## base ## _2_c_template(c, buf, ubuf, vbuf, abuf, \
730  dest, dstW, yalpha, uvalpha, y, fmt); \
731 } \
732  \
733 static void name ## ext ## _1_c(SwsContext *c, const int16_t *buf0, \
734  const int16_t *ubuf[2], const int16_t *vbuf[2], \
735  const int16_t *abuf0, uint8_t *dest, int dstW, \
736  int uvalpha, int y) \
737 { \
738  name ## base ## _1_c_template(c, buf0, ubuf, vbuf, \
739  abuf0, dest, dstW, uvalpha, \
740  y, fmt); \
741 }
742 
743 YUV2PACKEDWRAPPER(yuv2mono,, white, AV_PIX_FMT_MONOWHITE)
744 YUV2PACKEDWRAPPER(yuv2mono,, black, AV_PIX_FMT_MONOBLACK)
745 
746 #define output_pixels(pos, Y1, U, Y2, V) \
747  if (target == AV_PIX_FMT_YUYV422) { \
748  dest[pos + 0] = Y1; \
749  dest[pos + 1] = U; \
750  dest[pos + 2] = Y2; \
751  dest[pos + 3] = V; \
752  } else if (target == AV_PIX_FMT_YVYU422) { \
753  dest[pos + 0] = Y1; \
754  dest[pos + 1] = V; \
755  dest[pos + 2] = Y2; \
756  dest[pos + 3] = U; \
757  } else { /* AV_PIX_FMT_UYVY422 */ \
758  dest[pos + 0] = U; \
759  dest[pos + 1] = Y1; \
760  dest[pos + 2] = V; \
761  dest[pos + 3] = Y2; \
762  }
763 
764 static av_always_inline void
765 yuv2422_X_c_template(SwsContext *c, const int16_t *lumFilter,
766  const int16_t **lumSrc, int lumFilterSize,
767  const int16_t *chrFilter, const int16_t **chrUSrc,
768  const int16_t **chrVSrc, int chrFilterSize,
769  const int16_t **alpSrc, uint8_t *dest, int dstW,
770  int y, enum AVPixelFormat target)
771 {
772  int i;
773 
774  for (i = 0; i < ((dstW + 1) >> 1); i++) {
775  int j;
776  int Y1 = 1 << 18;
777  int Y2 = 1 << 18;
778  int U = 1 << 18;
779  int V = 1 << 18;
780 
781  for (j = 0; j < lumFilterSize; j++) {
782  Y1 += lumSrc[j][i * 2] * lumFilter[j];
783  Y2 += lumSrc[j][i * 2 + 1] * lumFilter[j];
784  }
785  for (j = 0; j < chrFilterSize; j++) {
786  U += chrUSrc[j][i] * chrFilter[j];
787  V += chrVSrc[j][i] * chrFilter[j];
788  }
789  Y1 >>= 19;
790  Y2 >>= 19;
791  U >>= 19;
792  V >>= 19;
793  if ((Y1 | Y2 | U | V) & 0x100) {
794  Y1 = av_clip_uint8(Y1);
795  Y2 = av_clip_uint8(Y2);
796  U = av_clip_uint8(U);
797  V = av_clip_uint8(V);
798  }
799  output_pixels(4*i, Y1, U, Y2, V);
800  }
801 }
802 
803 static av_always_inline void
804 yuv2422_2_c_template(SwsContext *c, const int16_t *buf[2],
805  const int16_t *ubuf[2], const int16_t *vbuf[2],
806  const int16_t *abuf[2], uint8_t *dest, int dstW,
807  int yalpha, int uvalpha, int y,
808  enum AVPixelFormat target)
809 {
810  const int16_t *buf0 = buf[0], *buf1 = buf[1],
811  *ubuf0 = ubuf[0], *ubuf1 = ubuf[1],
812  *vbuf0 = vbuf[0], *vbuf1 = vbuf[1];
813  int yalpha1 = 4096 - yalpha;
814  int uvalpha1 = 4096 - uvalpha;
815  int i;
816  av_assert2(yalpha <= 4096U);
817  av_assert2(uvalpha <= 4096U);
818 
819  for (i = 0; i < ((dstW + 1) >> 1); i++) {
820  int Y1 = (buf0[i * 2] * yalpha1 + buf1[i * 2] * yalpha) >> 19;
821  int Y2 = (buf0[i * 2 + 1] * yalpha1 + buf1[i * 2 + 1] * yalpha) >> 19;
822  int U = (ubuf0[i] * uvalpha1 + ubuf1[i] * uvalpha) >> 19;
823  int V = (vbuf0[i] * uvalpha1 + vbuf1[i] * uvalpha) >> 19;
824 
825  if ((Y1 | Y2 | U | V) & 0x100) {
826  Y1 = av_clip_uint8(Y1);
827  Y2 = av_clip_uint8(Y2);
828  U = av_clip_uint8(U);
829  V = av_clip_uint8(V);
830  }
831 
832  output_pixels(i * 4, Y1, U, Y2, V);
833  }
834 }
835 
836 static av_always_inline void
837 yuv2422_1_c_template(SwsContext *c, const int16_t *buf0,
838  const int16_t *ubuf[2], const int16_t *vbuf[2],
839  const int16_t *abuf0, uint8_t *dest, int dstW,
840  int uvalpha, int y, enum AVPixelFormat target)
841 {
842  const int16_t *ubuf0 = ubuf[0], *vbuf0 = vbuf[0];
843  int i;
844 
845  if (uvalpha < 2048) {
846  for (i = 0; i < ((dstW + 1) >> 1); i++) {
847  int Y1 = (buf0[i * 2 ]+64) >> 7;
848  int Y2 = (buf0[i * 2 + 1]+64) >> 7;
849  int U = (ubuf0[i] +64) >> 7;
850  int V = (vbuf0[i] +64) >> 7;
851 
852  if ((Y1 | Y2 | U | V) & 0x100) {
853  Y1 = av_clip_uint8(Y1);
854  Y2 = av_clip_uint8(Y2);
855  U = av_clip_uint8(U);
856  V = av_clip_uint8(V);
857  }
858 
859  output_pixels(i * 4, Y1, U, Y2, V);
860  }
861  } else {
862  const int16_t *ubuf1 = ubuf[1], *vbuf1 = vbuf[1];
863  for (i = 0; i < ((dstW + 1) >> 1); i++) {
864  int Y1 = (buf0[i * 2 ] + 64) >> 7;
865  int Y2 = (buf0[i * 2 + 1] + 64) >> 7;
866  int U = (ubuf0[i] + ubuf1[i]+128) >> 8;
867  int V = (vbuf0[i] + vbuf1[i]+128) >> 8;
868 
869  if ((Y1 | Y2 | U | V) & 0x100) {
870  Y1 = av_clip_uint8(Y1);
871  Y2 = av_clip_uint8(Y2);
872  U = av_clip_uint8(U);
873  V = av_clip_uint8(V);
874  }
875 
876  output_pixels(i * 4, Y1, U, Y2, V);
877  }
878  }
879 }
880 
881 #undef output_pixels
882 
883 YUV2PACKEDWRAPPER(yuv2, 422, yuyv422, AV_PIX_FMT_YUYV422)
884 YUV2PACKEDWRAPPER(yuv2, 422, yvyu422, AV_PIX_FMT_YVYU422)
885 YUV2PACKEDWRAPPER(yuv2, 422, uyvy422, AV_PIX_FMT_UYVY422)
886 
887 #define R_B ((target == AV_PIX_FMT_RGB48LE || target == AV_PIX_FMT_RGB48BE || target == AV_PIX_FMT_RGBA64LE || target == AV_PIX_FMT_RGBA64BE) ? R : B)
888 #define B_R ((target == AV_PIX_FMT_RGB48LE || target == AV_PIX_FMT_RGB48BE || target == AV_PIX_FMT_RGBA64LE || target == AV_PIX_FMT_RGBA64BE) ? B : R)
889 #define output_pixel(pos, val) \
890  if (isBE(target)) { \
891  AV_WB16(pos, val); \
892  } else { \
893  AV_WL16(pos, val); \
894  }
895 
896 static av_always_inline void
897 yuv2ya16_X_c_template(SwsContext *c, const int16_t *lumFilter,
898  const int32_t **lumSrc, int lumFilterSize,
899  const int16_t *chrFilter, const int32_t **unused_chrUSrc,
900  const int32_t **unused_chrVSrc, int unused_chrFilterSize,
901  const int32_t **alpSrc, uint16_t *dest, int dstW,
902  int y, enum AVPixelFormat target, int unused_hasAlpha, int unused_eightbytes)
903 {
904  int hasAlpha = !!alpSrc;
905  int i;
906 
907  for (i = 0; i < dstW; i++) {
908  int j;
909  int Y = -0x40000000;
910  int A = 0xffff;
911 
912  for (j = 0; j < lumFilterSize; j++)
913  Y += lumSrc[j][i] * lumFilter[j];
914 
915  Y >>= 15;
916  Y += (1<<3) + 0x8000;
917  Y = av_clip_uint16(Y);
918 
919  if (hasAlpha) {
920  A = -0x40000000 + (1<<14);
921  for (j = 0; j < lumFilterSize; j++)
922  A += alpSrc[j][i] * lumFilter[j];
923 
924  A >>= 15;
925  A += 0x8000;
926  A = av_clip_uint16(A);
927  }
928 
929  output_pixel(&dest[2 * i ], Y);
930  output_pixel(&dest[2 * i + 1], A);
931  }
932 }
933 
934 static av_always_inline void
936  const int32_t *unused_ubuf[2], const int32_t *unused_vbuf[2],
937  const int32_t *abuf[2], uint16_t *dest, int dstW,
938  int yalpha, int unused_uvalpha, int y,
939  enum AVPixelFormat target, int unused_hasAlpha, int unused_eightbytes)
940 {
941  int hasAlpha = abuf && abuf[0] && abuf[1];
942  const int32_t *buf0 = buf[0], *buf1 = buf[1],
943  *abuf0 = hasAlpha ? abuf[0] : NULL,
944  *abuf1 = hasAlpha ? abuf[1] : NULL;
945  int yalpha1 = 4096 - yalpha;
946  int i;
947 
948  av_assert2(yalpha <= 4096U);
949 
950  for (i = 0; i < dstW; i++) {
951  int Y = (buf0[i] * yalpha1 + buf1[i] * yalpha) >> 15;
952  int A;
953 
954  Y = av_clip_uint16(Y);
955 
956  if (hasAlpha) {
957  A = (abuf0[i] * yalpha1 + abuf1[i] * yalpha) >> 15;
958  A = av_clip_uint16(A);
959  }
960 
961  output_pixel(&dest[2 * i ], Y);
962  output_pixel(&dest[2 * i + 1], hasAlpha ? A : 65535);
963  }
964 }
965 
966 static av_always_inline void
968  const int32_t *unused_ubuf[2], const int32_t *unused_vbuf[2],
969  const int32_t *abuf0, uint16_t *dest, int dstW,
970  int unused_uvalpha, int y, enum AVPixelFormat target, int unused_hasAlpha, int unused_eightbytes)
971 {
972  int hasAlpha = !!abuf0;
973  int i;
974 
975  for (i = 0; i < dstW; i++) {
976  int Y = buf0[i] >> 3;/* 19 - 16 */
977  int A;
978 
979  Y = av_clip_uint16(Y);
980 
981  if (hasAlpha) {
982  A = abuf0[i] >> 3;
983  if (A & 0x100)
984  A = av_clip_uint16(A);
985  }
986 
987  output_pixel(&dest[2 * i ], Y);
988  output_pixel(&dest[2 * i + 1], hasAlpha ? A : 65535);
989  }
990 }
991 
992 static av_always_inline void
993 yuv2rgba64_X_c_template(SwsContext *c, const int16_t *lumFilter,
994  const int32_t **lumSrc, int lumFilterSize,
995  const int16_t *chrFilter, const int32_t **chrUSrc,
996  const int32_t **chrVSrc, int chrFilterSize,
997  const int32_t **alpSrc, uint16_t *dest, int dstW,
998  int y, enum AVPixelFormat target, int hasAlpha, int eightbytes)
999 {
1000  int i;
1001  int A1 = 0xffff<<14, A2 = 0xffff<<14;
1002 
1003  for (i = 0; i < ((dstW + 1) >> 1); i++) {
1004  int j;
1005  int Y1 = -0x40000000;
1006  int Y2 = -0x40000000;
1007  int U = -(128 << 23); // 19
1008  int V = -(128 << 23);
1009  int R, G, B;
1010 
1011  for (j = 0; j < lumFilterSize; j++) {
1012  Y1 += lumSrc[j][i * 2] * (unsigned)lumFilter[j];
1013  Y2 += lumSrc[j][i * 2 + 1] * (unsigned)lumFilter[j];
1014  }
1015  for (j = 0; j < chrFilterSize; j++) {;
1016  U += chrUSrc[j][i] * (unsigned)chrFilter[j];
1017  V += chrVSrc[j][i] * (unsigned)chrFilter[j];
1018  }
1019 
1020  if (hasAlpha) {
1021  A1 = -0x40000000;
1022  A2 = -0x40000000;
1023  for (j = 0; j < lumFilterSize; j++) {
1024  A1 += alpSrc[j][i * 2] * (unsigned)lumFilter[j];
1025  A2 += alpSrc[j][i * 2 + 1] * (unsigned)lumFilter[j];
1026  }
1027  A1 >>= 1;
1028  A1 += 0x20002000;
1029  A2 >>= 1;
1030  A2 += 0x20002000;
1031  }
1032 
1033  // 8 bits: 12+15=27; 16 bits: 12+19=31
1034  Y1 >>= 14; // 10
1035  Y1 += 0x10000;
1036  Y2 >>= 14;
1037  Y2 += 0x10000;
1038  U >>= 14;
1039  V >>= 14;
1040 
1041  // 8 bits: 27 -> 17 bits, 16 bits: 31 - 14 = 17 bits
1042  Y1 -= c->yuv2rgb_y_offset;
1043  Y2 -= c->yuv2rgb_y_offset;
1044  Y1 *= c->yuv2rgb_y_coeff;
1045  Y2 *= c->yuv2rgb_y_coeff;
1046  Y1 += 1 << 13; // 21
1047  Y2 += 1 << 13;
1048  // 8 bits: 17 + 13 bits = 30 bits, 16 bits: 17 + 13 bits = 30 bits
1049 
1050  R = V * c->yuv2rgb_v2r_coeff;
1051  G = V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff;
1052  B = U * c->yuv2rgb_u2b_coeff;
1053 
1054  // 8 bits: 30 - 22 = 8 bits, 16 bits: 30 bits - 14 = 16 bits
1055  output_pixel(&dest[0], av_clip_uintp2(R_B + Y1, 30) >> 14);
1056  output_pixel(&dest[1], av_clip_uintp2( G + Y1, 30) >> 14);
1057  output_pixel(&dest[2], av_clip_uintp2(B_R + Y1, 30) >> 14);
1058  if (eightbytes) {
1059  output_pixel(&dest[3], av_clip_uintp2(A1 , 30) >> 14);
1060  output_pixel(&dest[4], av_clip_uintp2(R_B + Y2, 30) >> 14);
1061  output_pixel(&dest[5], av_clip_uintp2( G + Y2, 30) >> 14);
1062  output_pixel(&dest[6], av_clip_uintp2(B_R + Y2, 30) >> 14);
1063  output_pixel(&dest[7], av_clip_uintp2(A2 , 30) >> 14);
1064  dest += 8;
1065  } else {
1066  output_pixel(&dest[3], av_clip_uintp2(R_B + Y2, 30) >> 14);
1067  output_pixel(&dest[4], av_clip_uintp2( G + Y2, 30) >> 14);
1068  output_pixel(&dest[5], av_clip_uintp2(B_R + Y2, 30) >> 14);
1069  dest += 6;
1070  }
1071  }
1072 }
1073 
1074 static av_always_inline void
1076  const int32_t *ubuf[2], const int32_t *vbuf[2],
1077  const int32_t *abuf[2], uint16_t *dest, int dstW,
1078  int yalpha, int uvalpha, int y,
1079  enum AVPixelFormat target, int hasAlpha, int eightbytes)
1080 {
1081  const int32_t *buf0 = buf[0], *buf1 = buf[1],
1082  *ubuf0 = ubuf[0], *ubuf1 = ubuf[1],
1083  *vbuf0 = vbuf[0], *vbuf1 = vbuf[1],
1084  *abuf0 = hasAlpha ? abuf[0] : NULL,
1085  *abuf1 = hasAlpha ? abuf[1] : NULL;
1086  int yalpha1 = 4096 - yalpha;
1087  int uvalpha1 = 4096 - uvalpha;
1088  int i;
1089  int A1 = 0xffff<<14, A2 = 0xffff<<14;
1090 
1091  av_assert2(yalpha <= 4096U);
1092  av_assert2(uvalpha <= 4096U);
1093 
1094  for (i = 0; i < ((dstW + 1) >> 1); i++) {
1095  int Y1 = (buf0[i * 2] * yalpha1 + buf1[i * 2] * yalpha) >> 14;
1096  int Y2 = (buf0[i * 2 + 1] * yalpha1 + buf1[i * 2 + 1] * yalpha) >> 14;
1097  int U = (ubuf0[i] * uvalpha1 + ubuf1[i] * uvalpha - (128 << 23)) >> 14;
1098  int V = (vbuf0[i] * uvalpha1 + vbuf1[i] * uvalpha - (128 << 23)) >> 14;
1099  int R, G, B;
1100 
1101  Y1 -= c->yuv2rgb_y_offset;
1102  Y2 -= c->yuv2rgb_y_offset;
1103  Y1 *= c->yuv2rgb_y_coeff;
1104  Y2 *= c->yuv2rgb_y_coeff;
1105  Y1 += 1 << 13;
1106  Y2 += 1 << 13;
1107 
1108  R = V * c->yuv2rgb_v2r_coeff;
1109  G = V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff;
1110  B = U * c->yuv2rgb_u2b_coeff;
1111 
1112  if (hasAlpha) {
1113  A1 = (abuf0[i * 2 ] * yalpha1 + abuf1[i * 2 ] * yalpha) >> 1;
1114  A2 = (abuf0[i * 2 + 1] * yalpha1 + abuf1[i * 2 + 1] * yalpha) >> 1;
1115 
1116  A1 += 1 << 13;
1117  A2 += 1 << 13;
1118  }
1119 
1120  output_pixel(&dest[0], av_clip_uintp2(R_B + Y1, 30) >> 14);
1121  output_pixel(&dest[1], av_clip_uintp2( G + Y1, 30) >> 14);
1122  output_pixel(&dest[2], av_clip_uintp2(B_R + Y1, 30) >> 14);
1123  if (eightbytes) {
1124  output_pixel(&dest[3], av_clip_uintp2(A1 , 30) >> 14);
1125  output_pixel(&dest[4], av_clip_uintp2(R_B + Y2, 30) >> 14);
1126  output_pixel(&dest[5], av_clip_uintp2( G + Y2, 30) >> 14);
1127  output_pixel(&dest[6], av_clip_uintp2(B_R + Y2, 30) >> 14);
1128  output_pixel(&dest[7], av_clip_uintp2(A2 , 30) >> 14);
1129  dest += 8;
1130  } else {
1131  output_pixel(&dest[3], av_clip_uintp2(R_B + Y2, 30) >> 14);
1132  output_pixel(&dest[4], av_clip_uintp2( G + Y2, 30) >> 14);
1133  output_pixel(&dest[5], av_clip_uintp2(B_R + Y2, 30) >> 14);
1134  dest += 6;
1135  }
1136  }
1137 }
1138 
1139 static av_always_inline void
1141  const int32_t *ubuf[2], const int32_t *vbuf[2],
1142  const int32_t *abuf0, uint16_t *dest, int dstW,
1143  int uvalpha, int y, enum AVPixelFormat target, int hasAlpha, int eightbytes)
1144 {
1145  const int32_t *ubuf0 = ubuf[0], *vbuf0 = vbuf[0];
1146  int i;
1147  int A1 = 0xffff<<14, A2= 0xffff<<14;
1148 
1149  if (uvalpha < 2048) {
1150  for (i = 0; i < ((dstW + 1) >> 1); i++) {
1151  int Y1 = (buf0[i * 2] ) >> 2;
1152  int Y2 = (buf0[i * 2 + 1]) >> 2;
1153  int U = (ubuf0[i] - (128 << 11)) >> 2;
1154  int V = (vbuf0[i] - (128 << 11)) >> 2;
1155  int R, G, B;
1156 
1157  Y1 -= c->yuv2rgb_y_offset;
1158  Y2 -= c->yuv2rgb_y_offset;
1159  Y1 *= c->yuv2rgb_y_coeff;
1160  Y2 *= c->yuv2rgb_y_coeff;
1161  Y1 += 1 << 13;
1162  Y2 += 1 << 13;
1163 
1164  if (hasAlpha) {
1165  A1 = abuf0[i * 2 ] << 11;
1166  A2 = abuf0[i * 2 + 1] << 11;
1167 
1168  A1 += 1 << 13;
1169  A2 += 1 << 13;
1170  }
1171 
1172  R = V * c->yuv2rgb_v2r_coeff;
1173  G = V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff;
1174  B = U * c->yuv2rgb_u2b_coeff;
1175 
1176  output_pixel(&dest[0], av_clip_uintp2(R_B + Y1, 30) >> 14);
1177  output_pixel(&dest[1], av_clip_uintp2( G + Y1, 30) >> 14);
1178  output_pixel(&dest[2], av_clip_uintp2(B_R + Y1, 30) >> 14);
1179  if (eightbytes) {
1180  output_pixel(&dest[3], av_clip_uintp2(A1 , 30) >> 14);
1181  output_pixel(&dest[4], av_clip_uintp2(R_B + Y2, 30) >> 14);
1182  output_pixel(&dest[5], av_clip_uintp2( G + Y2, 30) >> 14);
1183  output_pixel(&dest[6], av_clip_uintp2(B_R + Y2, 30) >> 14);
1184  output_pixel(&dest[7], av_clip_uintp2(A2 , 30) >> 14);
1185  dest += 8;
1186  } else {
1187  output_pixel(&dest[3], av_clip_uintp2(R_B + Y2, 30) >> 14);
1188  output_pixel(&dest[4], av_clip_uintp2( G + Y2, 30) >> 14);
1189  output_pixel(&dest[5], av_clip_uintp2(B_R + Y2, 30) >> 14);
1190  dest += 6;
1191  }
1192  }
1193  } else {
1194  const int32_t *ubuf1 = ubuf[1], *vbuf1 = vbuf[1];
1195  int A1 = 0xffff<<14, A2 = 0xffff<<14;
1196  for (i = 0; i < ((dstW + 1) >> 1); i++) {
1197  int Y1 = (buf0[i * 2] ) >> 2;
1198  int Y2 = (buf0[i * 2 + 1]) >> 2;
1199  int U = (ubuf0[i] + ubuf1[i] - (128 << 12)) >> 3;
1200  int V = (vbuf0[i] + vbuf1[i] - (128 << 12)) >> 3;
1201  int R, G, B;
1202 
1203  Y1 -= c->yuv2rgb_y_offset;
1204  Y2 -= c->yuv2rgb_y_offset;
1205  Y1 *= c->yuv2rgb_y_coeff;
1206  Y2 *= c->yuv2rgb_y_coeff;
1207  Y1 += 1 << 13;
1208  Y2 += 1 << 13;
1209 
1210  if (hasAlpha) {
1211  A1 = abuf0[i * 2 ] << 11;
1212  A2 = abuf0[i * 2 + 1] << 11;
1213 
1214  A1 += 1 << 13;
1215  A2 += 1 << 13;
1216  }
1217 
1218  R = V * c->yuv2rgb_v2r_coeff;
1219  G = V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff;
1220  B = U * c->yuv2rgb_u2b_coeff;
1221 
1222  output_pixel(&dest[0], av_clip_uintp2(R_B + Y1, 30) >> 14);
1223  output_pixel(&dest[1], av_clip_uintp2( G + Y1, 30) >> 14);
1224  output_pixel(&dest[2], av_clip_uintp2(B_R + Y1, 30) >> 14);
1225  if (eightbytes) {
1226  output_pixel(&dest[3], av_clip_uintp2(A1 , 30) >> 14);
1227  output_pixel(&dest[4], av_clip_uintp2(R_B + Y2, 30) >> 14);
1228  output_pixel(&dest[5], av_clip_uintp2( G + Y2, 30) >> 14);
1229  output_pixel(&dest[6], av_clip_uintp2(B_R + Y2, 30) >> 14);
1230  output_pixel(&dest[7], av_clip_uintp2(A2 , 30) >> 14);
1231  dest += 8;
1232  } else {
1233  output_pixel(&dest[3], av_clip_uintp2(R_B + Y2, 30) >> 14);
1234  output_pixel(&dest[4], av_clip_uintp2( G + Y2, 30) >> 14);
1235  output_pixel(&dest[5], av_clip_uintp2(B_R + Y2, 30) >> 14);
1236  dest += 6;
1237  }
1238  }
1239  }
1240 }
1241 
1242 static av_always_inline void
1243 yuv2rgba64_full_X_c_template(SwsContext *c, const int16_t *lumFilter,
1244  const int32_t **lumSrc, int lumFilterSize,
1245  const int16_t *chrFilter, const int32_t **chrUSrc,
1246  const int32_t **chrVSrc, int chrFilterSize,
1247  const int32_t **alpSrc, uint16_t *dest, int dstW,
1248  int y, enum AVPixelFormat target, int hasAlpha, int eightbytes)
1249 {
1250  int i;
1251  int A = 0xffff<<14;
1252 
1253  for (i = 0; i < dstW; i++) {
1254  int j;
1255  int Y = -0x40000000;
1256  int U = -(128 << 23); // 19
1257  int V = -(128 << 23);
1258  int R, G, B;
1259 
1260  for (j = 0; j < lumFilterSize; j++) {
1261  Y += lumSrc[j][i] * (unsigned)lumFilter[j];
1262  }
1263  for (j = 0; j < chrFilterSize; j++) {;
1264  U += chrUSrc[j][i] * (unsigned)chrFilter[j];
1265  V += chrVSrc[j][i] * (unsigned)chrFilter[j];
1266  }
1267 
1268  if (hasAlpha) {
1269  A = -0x40000000;
1270  for (j = 0; j < lumFilterSize; j++) {
1271  A += alpSrc[j][i] * (unsigned)lumFilter[j];
1272  }
1273  A >>= 1;
1274  A += 0x20002000;
1275  }
1276 
1277  // 8bit: 12+15=27; 16-bit: 12+19=31
1278  Y >>= 14; // 10
1279  Y += 0x10000;
1280  U >>= 14;
1281  V >>= 14;
1282 
1283  // 8bit: 27 -> 17bit, 16bit: 31 - 14 = 17bit
1284  Y -= c->yuv2rgb_y_offset;
1285  Y *= c->yuv2rgb_y_coeff;
1286  Y += 1 << 13; // 21
1287  // 8bit: 17 + 13bit = 30bit, 16bit: 17 + 13bit = 30bit
1288 
1289  R = V * c->yuv2rgb_v2r_coeff;
1290  G = V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff;
1291  B = U * c->yuv2rgb_u2b_coeff;
1292 
1293  // 8bit: 30 - 22 = 8bit, 16bit: 30bit - 14 = 16bit
1294  output_pixel(&dest[0], av_clip_uintp2(R_B + Y, 30) >> 14);
1295  output_pixel(&dest[1], av_clip_uintp2( G + Y, 30) >> 14);
1296  output_pixel(&dest[2], av_clip_uintp2(B_R + Y, 30) >> 14);
1297  if (eightbytes) {
1298  output_pixel(&dest[3], av_clip_uintp2(A, 30) >> 14);
1299  dest += 4;
1300  } else {
1301  dest += 3;
1302  }
1303  }
1304 }
1305 
1306 static av_always_inline void
1308  const int32_t *ubuf[2], const int32_t *vbuf[2],
1309  const int32_t *abuf[2], uint16_t *dest, int dstW,
1310  int yalpha, int uvalpha, int y,
1311  enum AVPixelFormat target, int hasAlpha, int eightbytes)
1312 {
1313  const int32_t *buf0 = buf[0], *buf1 = buf[1],
1314  *ubuf0 = ubuf[0], *ubuf1 = ubuf[1],
1315  *vbuf0 = vbuf[0], *vbuf1 = vbuf[1],
1316  *abuf0 = hasAlpha ? abuf[0] : NULL,
1317  *abuf1 = hasAlpha ? abuf[1] : NULL;
1318  int yalpha1 = 4096 - yalpha;
1319  int uvalpha1 = 4096 - uvalpha;
1320  int i;
1321  int A = 0xffff<<14;
1322 
1323  av_assert2(yalpha <= 4096U);
1324  av_assert2(uvalpha <= 4096U);
1325 
1326  for (i = 0; i < dstW; i++) {
1327  int Y = (buf0[i] * yalpha1 + buf1[i] * yalpha) >> 14;
1328  int U = (ubuf0[i] * uvalpha1 + ubuf1[i] * uvalpha - (128 << 23)) >> 14;
1329  int V = (vbuf0[i] * uvalpha1 + vbuf1[i] * uvalpha - (128 << 23)) >> 14;
1330  int R, G, B;
1331 
1332  Y -= c->yuv2rgb_y_offset;
1333  Y *= c->yuv2rgb_y_coeff;
1334  Y += 1 << 13;
1335 
1336  R = V * c->yuv2rgb_v2r_coeff;
1337  G = V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff;
1338  B = U * c->yuv2rgb_u2b_coeff;
1339 
1340  if (hasAlpha) {
1341  A = (abuf0[i] * yalpha1 + abuf1[i] * yalpha) >> 1;
1342 
1343  A += 1 << 13;
1344  }
1345 
1346  output_pixel(&dest[0], av_clip_uintp2(R_B + Y, 30) >> 14);
1347  output_pixel(&dest[1], av_clip_uintp2( G + Y, 30) >> 14);
1348  output_pixel(&dest[2], av_clip_uintp2(B_R + Y, 30) >> 14);
1349  if (eightbytes) {
1350  output_pixel(&dest[3], av_clip_uintp2(A, 30) >> 14);
1351  dest += 4;
1352  } else {
1353  dest += 3;
1354  }
1355  }
1356 }
1357 
1358 static av_always_inline void
1360  const int32_t *ubuf[2], const int32_t *vbuf[2],
1361  const int32_t *abuf0, uint16_t *dest, int dstW,
1362  int uvalpha, int y, enum AVPixelFormat target, int hasAlpha, int eightbytes)
1363 {
1364  const int32_t *ubuf0 = ubuf[0], *vbuf0 = vbuf[0];
1365  int i;
1366  int A = 0xffff<<14;
1367 
1368  if (uvalpha < 2048) {
1369  for (i = 0; i < dstW; i++) {
1370  int Y = (buf0[i]) >> 2;
1371  int U = (ubuf0[i] - (128 << 11)) >> 2;
1372  int V = (vbuf0[i] - (128 << 11)) >> 2;
1373  int R, G, B;
1374 
1375  Y -= c->yuv2rgb_y_offset;
1376  Y *= c->yuv2rgb_y_coeff;
1377  Y += 1 << 13;
1378 
1379  if (hasAlpha) {
1380  A = abuf0[i] << 11;
1381 
1382  A += 1 << 13;
1383  }
1384 
1385  R = V * c->yuv2rgb_v2r_coeff;
1386  G = V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff;
1387  B = U * c->yuv2rgb_u2b_coeff;
1388 
1389  output_pixel(&dest[0], av_clip_uintp2(R_B + Y, 30) >> 14);
1390  output_pixel(&dest[1], av_clip_uintp2( G + Y, 30) >> 14);
1391  output_pixel(&dest[2], av_clip_uintp2(B_R + Y, 30) >> 14);
1392  if (eightbytes) {
1393  output_pixel(&dest[3], av_clip_uintp2(A, 30) >> 14);
1394  dest += 4;
1395  } else {
1396  dest += 3;
1397  }
1398  }
1399  } else {
1400  const int32_t *ubuf1 = ubuf[1], *vbuf1 = vbuf[1];
1401  int A = 0xffff<<14;
1402  for (i = 0; i < dstW; i++) {
1403  int Y = (buf0[i] ) >> 2;
1404  int U = (ubuf0[i] + ubuf1[i] - (128 << 12)) >> 3;
1405  int V = (vbuf0[i] + vbuf1[i] - (128 << 12)) >> 3;
1406  int R, G, B;
1407 
1408  Y -= c->yuv2rgb_y_offset;
1409  Y *= c->yuv2rgb_y_coeff;
1410  Y += 1 << 13;
1411 
1412  if (hasAlpha) {
1413  A = abuf0[i] << 11;
1414 
1415  A += 1 << 13;
1416  }
1417 
1418  R = V * c->yuv2rgb_v2r_coeff;
1419  G = V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff;
1420  B = U * c->yuv2rgb_u2b_coeff;
1421 
1422  output_pixel(&dest[0], av_clip_uintp2(R_B + Y, 30) >> 14);
1423  output_pixel(&dest[1], av_clip_uintp2( G + Y, 30) >> 14);
1424  output_pixel(&dest[2], av_clip_uintp2(B_R + Y, 30) >> 14);
1425  if (eightbytes) {
1426  output_pixel(&dest[3], av_clip_uintp2(A, 30) >> 14);
1427  dest += 4;
1428  } else {
1429  dest += 3;
1430  }
1431  }
1432  }
1433 }
1434 
1435 #undef output_pixel
1436 #undef r_b
1437 #undef b_r
1438 
1439 #define YUV2PACKED16WRAPPER(name, base, ext, fmt, hasAlpha, eightbytes) \
1440 static void name ## ext ## _X_c(SwsContext *c, const int16_t *lumFilter, \
1441  const int16_t **_lumSrc, int lumFilterSize, \
1442  const int16_t *chrFilter, const int16_t **_chrUSrc, \
1443  const int16_t **_chrVSrc, int chrFilterSize, \
1444  const int16_t **_alpSrc, uint8_t *_dest, int dstW, \
1445  int y) \
1446 { \
1447  const int32_t **lumSrc = (const int32_t **) _lumSrc, \
1448  **chrUSrc = (const int32_t **) _chrUSrc, \
1449  **chrVSrc = (const int32_t **) _chrVSrc, \
1450  **alpSrc = (const int32_t **) _alpSrc; \
1451  uint16_t *dest = (uint16_t *) _dest; \
1452  name ## base ## _X_c_template(c, lumFilter, lumSrc, lumFilterSize, \
1453  chrFilter, chrUSrc, chrVSrc, chrFilterSize, \
1454  alpSrc, dest, dstW, y, fmt, hasAlpha, eightbytes); \
1455 } \
1456  \
1457 static void name ## ext ## _2_c(SwsContext *c, const int16_t *_buf[2], \
1458  const int16_t *_ubuf[2], const int16_t *_vbuf[2], \
1459  const int16_t *_abuf[2], uint8_t *_dest, int dstW, \
1460  int yalpha, int uvalpha, int y) \
1461 { \
1462  const int32_t **buf = (const int32_t **) _buf, \
1463  **ubuf = (const int32_t **) _ubuf, \
1464  **vbuf = (const int32_t **) _vbuf, \
1465  **abuf = (const int32_t **) _abuf; \
1466  uint16_t *dest = (uint16_t *) _dest; \
1467  name ## base ## _2_c_template(c, buf, ubuf, vbuf, abuf, \
1468  dest, dstW, yalpha, uvalpha, y, fmt, hasAlpha, eightbytes); \
1469 } \
1470  \
1471 static void name ## ext ## _1_c(SwsContext *c, const int16_t *_buf0, \
1472  const int16_t *_ubuf[2], const int16_t *_vbuf[2], \
1473  const int16_t *_abuf0, uint8_t *_dest, int dstW, \
1474  int uvalpha, int y) \
1475 { \
1476  const int32_t *buf0 = (const int32_t *) _buf0, \
1477  **ubuf = (const int32_t **) _ubuf, \
1478  **vbuf = (const int32_t **) _vbuf, \
1479  *abuf0 = (const int32_t *) _abuf0; \
1480  uint16_t *dest = (uint16_t *) _dest; \
1481  name ## base ## _1_c_template(c, buf0, ubuf, vbuf, abuf0, dest, \
1482  dstW, uvalpha, y, fmt, hasAlpha, eightbytes); \
1483 }
1484 
1485 YUV2PACKED16WRAPPER(yuv2, rgba64, rgb48be, AV_PIX_FMT_RGB48BE, 0, 0)
1486 YUV2PACKED16WRAPPER(yuv2, rgba64, rgb48le, AV_PIX_FMT_RGB48LE, 0, 0)
1487 YUV2PACKED16WRAPPER(yuv2, rgba64, bgr48be, AV_PIX_FMT_BGR48BE, 0, 0)
1488 YUV2PACKED16WRAPPER(yuv2, rgba64, bgr48le, AV_PIX_FMT_BGR48LE, 0, 0)
1489 YUV2PACKED16WRAPPER(yuv2, rgba64, rgba64be, AV_PIX_FMT_RGBA64BE, 1, 1)
1490 YUV2PACKED16WRAPPER(yuv2, rgba64, rgba64le, AV_PIX_FMT_RGBA64LE, 1, 1)
1491 YUV2PACKED16WRAPPER(yuv2, rgba64, rgbx64be, AV_PIX_FMT_RGBA64BE, 0, 1)
1492 YUV2PACKED16WRAPPER(yuv2, rgba64, rgbx64le, AV_PIX_FMT_RGBA64LE, 0, 1)
1493 YUV2PACKED16WRAPPER(yuv2, rgba64, bgra64be, AV_PIX_FMT_BGRA64BE, 1, 1)
1494 YUV2PACKED16WRAPPER(yuv2, rgba64, bgra64le, AV_PIX_FMT_BGRA64LE, 1, 1)
1495 YUV2PACKED16WRAPPER(yuv2, rgba64, bgrx64be, AV_PIX_FMT_BGRA64BE, 0, 1)
1496 YUV2PACKED16WRAPPER(yuv2, rgba64, bgrx64le, AV_PIX_FMT_BGRA64LE, 0, 1)
1497 YUV2PACKED16WRAPPER(yuv2, ya16, ya16be, AV_PIX_FMT_YA16BE, 1, 0)
1498 YUV2PACKED16WRAPPER(yuv2, ya16, ya16le, AV_PIX_FMT_YA16LE, 1, 0)
1499 
1500 YUV2PACKED16WRAPPER(yuv2, rgba64_full, rgb48be_full, AV_PIX_FMT_RGB48BE, 0, 0)
1501 YUV2PACKED16WRAPPER(yuv2, rgba64_full, rgb48le_full, AV_PIX_FMT_RGB48LE, 0, 0)
1502 YUV2PACKED16WRAPPER(yuv2, rgba64_full, bgr48be_full, AV_PIX_FMT_BGR48BE, 0, 0)
1503 YUV2PACKED16WRAPPER(yuv2, rgba64_full, bgr48le_full, AV_PIX_FMT_BGR48LE, 0, 0)
1504 YUV2PACKED16WRAPPER(yuv2, rgba64_full, rgba64be_full, AV_PIX_FMT_RGBA64BE, 1, 1)
1505 YUV2PACKED16WRAPPER(yuv2, rgba64_full, rgba64le_full, AV_PIX_FMT_RGBA64LE, 1, 1)
1506 YUV2PACKED16WRAPPER(yuv2, rgba64_full, rgbx64be_full, AV_PIX_FMT_RGBA64BE, 0, 1)
1507 YUV2PACKED16WRAPPER(yuv2, rgba64_full, rgbx64le_full, AV_PIX_FMT_RGBA64LE, 0, 1)
1508 YUV2PACKED16WRAPPER(yuv2, rgba64_full, bgra64be_full, AV_PIX_FMT_BGRA64BE, 1, 1)
1509 YUV2PACKED16WRAPPER(yuv2, rgba64_full, bgra64le_full, AV_PIX_FMT_BGRA64LE, 1, 1)
1510 YUV2PACKED16WRAPPER(yuv2, rgba64_full, bgrx64be_full, AV_PIX_FMT_BGRA64BE, 0, 1)
1511 YUV2PACKED16WRAPPER(yuv2, rgba64_full, bgrx64le_full, AV_PIX_FMT_BGRA64LE, 0, 1)
1512 
1513 /*
1514  * Write out 2 RGB pixels in the target pixel format. This function takes a
1515  * R/G/B LUT as generated by ff_yuv2rgb_c_init_tables(), which takes care of
1516  * things like endianness conversion and shifting. The caller takes care of
1517  * setting the correct offset in these tables from the chroma (U/V) values.
1518  * This function then uses the luminance (Y1/Y2) values to write out the
1519  * correct RGB values into the destination buffer.
1520  */
1521 static av_always_inline void
1522 yuv2rgb_write(uint8_t *_dest, int i, int Y1, int Y2,
1523  unsigned A1, unsigned A2,
1524  const void *_r, const void *_g, const void *_b, int y,
1525  enum AVPixelFormat target, int hasAlpha)
1526 {
1527  if (target == AV_PIX_FMT_ARGB || target == AV_PIX_FMT_RGBA ||
1528  target == AV_PIX_FMT_ABGR || target == AV_PIX_FMT_BGRA) {
1529  uint32_t *dest = (uint32_t *) _dest;
1530  const uint32_t *r = (const uint32_t *) _r;
1531  const uint32_t *g = (const uint32_t *) _g;
1532  const uint32_t *b = (const uint32_t *) _b;
1533 
1534 #if CONFIG_SMALL
1535  int sh = hasAlpha ? ((target == AV_PIX_FMT_RGB32_1 || target == AV_PIX_FMT_BGR32_1) ? 0 : 24) : 0;
1536 
1537  dest[i * 2 + 0] = r[Y1] + g[Y1] + b[Y1] + (hasAlpha ? A1 << sh : 0);
1538  dest[i * 2 + 1] = r[Y2] + g[Y2] + b[Y2] + (hasAlpha ? A2 << sh : 0);
1539 #else
1540  if (hasAlpha) {
1541  int sh = (target == AV_PIX_FMT_RGB32_1 || target == AV_PIX_FMT_BGR32_1) ? 0 : 24;
1542 
1543  av_assert2((((r[Y1] + g[Y1] + b[Y1]) >> sh) & 0xFF) == 0);
1544  dest[i * 2 + 0] = r[Y1] + g[Y1] + b[Y1] + (A1 << sh);
1545  dest[i * 2 + 1] = r[Y2] + g[Y2] + b[Y2] + (A2 << sh);
1546  } else {
1547 #if defined(ASSERT_LEVEL) && ASSERT_LEVEL > 1
1548  int sh = (target == AV_PIX_FMT_RGB32_1 || target == AV_PIX_FMT_BGR32_1) ? 0 : 24;
1549 
1550  av_assert2((((r[Y1] + g[Y1] + b[Y1]) >> sh) & 0xFF) == 0xFF);
1551 #endif
1552  dest[i * 2 + 0] = r[Y1] + g[Y1] + b[Y1];
1553  dest[i * 2 + 1] = r[Y2] + g[Y2] + b[Y2];
1554  }
1555 #endif
1556  } else if (target == AV_PIX_FMT_RGB24 || target == AV_PIX_FMT_BGR24) {
1557  uint8_t *dest = (uint8_t *) _dest;
1558  const uint8_t *r = (const uint8_t *) _r;
1559  const uint8_t *g = (const uint8_t *) _g;
1560  const uint8_t *b = (const uint8_t *) _b;
1561 
1562 #define r_b ((target == AV_PIX_FMT_RGB24) ? r : b)
1563 #define b_r ((target == AV_PIX_FMT_RGB24) ? b : r)
1564 
1565  dest[i * 6 + 0] = r_b[Y1];
1566  dest[i * 6 + 1] = g[Y1];
1567  dest[i * 6 + 2] = b_r[Y1];
1568  dest[i * 6 + 3] = r_b[Y2];
1569  dest[i * 6 + 4] = g[Y2];
1570  dest[i * 6 + 5] = b_r[Y2];
1571 #undef r_b
1572 #undef b_r
1573  } else if (target == AV_PIX_FMT_RGB565 || target == AV_PIX_FMT_BGR565 ||
1574  target == AV_PIX_FMT_RGB555 || target == AV_PIX_FMT_BGR555 ||
1575  target == AV_PIX_FMT_RGB444 || target == AV_PIX_FMT_BGR444) {
1576  uint16_t *dest = (uint16_t *) _dest;
1577  const uint16_t *r = (const uint16_t *) _r;
1578  const uint16_t *g = (const uint16_t *) _g;
1579  const uint16_t *b = (const uint16_t *) _b;
1580  int dr1, dg1, db1, dr2, dg2, db2;
1581 
1582  if (target == AV_PIX_FMT_RGB565 || target == AV_PIX_FMT_BGR565) {
1583  dr1 = ff_dither_2x2_8[ y & 1 ][0];
1584  dg1 = ff_dither_2x2_4[ y & 1 ][0];
1585  db1 = ff_dither_2x2_8[(y & 1) ^ 1][0];
1586  dr2 = ff_dither_2x2_8[ y & 1 ][1];
1587  dg2 = ff_dither_2x2_4[ y & 1 ][1];
1588  db2 = ff_dither_2x2_8[(y & 1) ^ 1][1];
1589  } else if (target == AV_PIX_FMT_RGB555 || target == AV_PIX_FMT_BGR555) {
1590  dr1 = ff_dither_2x2_8[ y & 1 ][0];
1591  dg1 = ff_dither_2x2_8[ y & 1 ][1];
1592  db1 = ff_dither_2x2_8[(y & 1) ^ 1][0];
1593  dr2 = ff_dither_2x2_8[ y & 1 ][1];
1594  dg2 = ff_dither_2x2_8[ y & 1 ][0];
1595  db2 = ff_dither_2x2_8[(y & 1) ^ 1][1];
1596  } else {
1597  dr1 = ff_dither_4x4_16[ y & 3 ][0];
1598  dg1 = ff_dither_4x4_16[ y & 3 ][1];
1599  db1 = ff_dither_4x4_16[(y & 3) ^ 3][0];
1600  dr2 = ff_dither_4x4_16[ y & 3 ][1];
1601  dg2 = ff_dither_4x4_16[ y & 3 ][0];
1602  db2 = ff_dither_4x4_16[(y & 3) ^ 3][1];
1603  }
1604 
1605  dest[i * 2 + 0] = r[Y1 + dr1] + g[Y1 + dg1] + b[Y1 + db1];
1606  dest[i * 2 + 1] = r[Y2 + dr2] + g[Y2 + dg2] + b[Y2 + db2];
1607  } else if (target == AV_PIX_FMT_X2RGB10) {
1608  uint32_t *dest = (uint32_t *) _dest;
1609  const uint32_t *r = (const uint32_t *) _r;
1610  const uint32_t *g = (const uint32_t *) _g;
1611  const uint32_t *b = (const uint32_t *) _b;
1612  dest[i * 2 + 0] = r[Y1] + g[Y1] + b[Y1];
1613  dest[i * 2 + 1] = r[Y2] + g[Y2] + b[Y2];
1614  } else /* 8/4 bits */ {
1615  uint8_t *dest = (uint8_t *) _dest;
1616  const uint8_t *r = (const uint8_t *) _r;
1617  const uint8_t *g = (const uint8_t *) _g;
1618  const uint8_t *b = (const uint8_t *) _b;
1619  int dr1, dg1, db1, dr2, dg2, db2;
1620 
1621  if (target == AV_PIX_FMT_RGB8 || target == AV_PIX_FMT_BGR8) {
1622  const uint8_t * const d64 = ff_dither_8x8_73[y & 7];
1623  const uint8_t * const d32 = ff_dither_8x8_32[y & 7];
1624  dr1 = dg1 = d32[(i * 2 + 0) & 7];
1625  db1 = d64[(i * 2 + 0) & 7];
1626  dr2 = dg2 = d32[(i * 2 + 1) & 7];
1627  db2 = d64[(i * 2 + 1) & 7];
1628  } else {
1629  const uint8_t * const d64 = ff_dither_8x8_73 [y & 7];
1630  const uint8_t * const d128 = ff_dither_8x8_220[y & 7];
1631  dr1 = db1 = d128[(i * 2 + 0) & 7];
1632  dg1 = d64[(i * 2 + 0) & 7];
1633  dr2 = db2 = d128[(i * 2 + 1) & 7];
1634  dg2 = d64[(i * 2 + 1) & 7];
1635  }
1636 
1637  if (target == AV_PIX_FMT_RGB4 || target == AV_PIX_FMT_BGR4) {
1638  dest[i] = r[Y1 + dr1] + g[Y1 + dg1] + b[Y1 + db1] +
1639  ((r[Y2 + dr2] + g[Y2 + dg2] + b[Y2 + db2]) << 4);
1640  } else {
1641  dest[i * 2 + 0] = r[Y1 + dr1] + g[Y1 + dg1] + b[Y1 + db1];
1642  dest[i * 2 + 1] = r[Y2 + dr2] + g[Y2 + dg2] + b[Y2 + db2];
1643  }
1644  }
1645 }
1646 
1647 static av_always_inline void
1648 yuv2rgb_X_c_template(SwsContext *c, const int16_t *lumFilter,
1649  const int16_t **lumSrc, int lumFilterSize,
1650  const int16_t *chrFilter, const int16_t **chrUSrc,
1651  const int16_t **chrVSrc, int chrFilterSize,
1652  const int16_t **alpSrc, uint8_t *dest, int dstW,
1653  int y, enum AVPixelFormat target, int hasAlpha)
1654 {
1655  int i;
1656 
1657  for (i = 0; i < ((dstW + 1) >> 1); i++) {
1658  int j, A1, A2;
1659  int Y1 = 1 << 18;
1660  int Y2 = 1 << 18;
1661  int U = 1 << 18;
1662  int V = 1 << 18;
1663  const void *r, *g, *b;
1664 
1665  for (j = 0; j < lumFilterSize; j++) {
1666  Y1 += lumSrc[j][i * 2] * lumFilter[j];
1667  Y2 += lumSrc[j][i * 2 + 1] * lumFilter[j];
1668  }
1669  for (j = 0; j < chrFilterSize; j++) {
1670  U += chrUSrc[j][i] * chrFilter[j];
1671  V += chrVSrc[j][i] * chrFilter[j];
1672  }
1673  Y1 >>= 19;
1674  Y2 >>= 19;
1675  U >>= 19;
1676  V >>= 19;
1677  if (hasAlpha) {
1678  A1 = 1 << 18;
1679  A2 = 1 << 18;
1680  for (j = 0; j < lumFilterSize; j++) {
1681  A1 += alpSrc[j][i * 2 ] * lumFilter[j];
1682  A2 += alpSrc[j][i * 2 + 1] * lumFilter[j];
1683  }
1684  A1 >>= 19;
1685  A2 >>= 19;
1686  if ((A1 | A2) & 0x100) {
1687  A1 = av_clip_uint8(A1);
1688  A2 = av_clip_uint8(A2);
1689  }
1690  }
1691 
1692  r = c->table_rV[V + YUVRGB_TABLE_HEADROOM];
1694  b = c->table_bU[U + YUVRGB_TABLE_HEADROOM];
1695 
1696  yuv2rgb_write(dest, i, Y1, Y2, hasAlpha ? A1 : 0, hasAlpha ? A2 : 0,
1697  r, g, b, y, target, hasAlpha);
1698  }
1699 }
1700 
1701 static av_always_inline void
1702 yuv2rgb_2_c_template(SwsContext *c, const int16_t *buf[2],
1703  const int16_t *ubuf[2], const int16_t *vbuf[2],
1704  const int16_t *abuf[2], uint8_t *dest, int dstW,
1705  int yalpha, int uvalpha, int y,
1706  enum AVPixelFormat target, int hasAlpha)
1707 {
1708  const int16_t *buf0 = buf[0], *buf1 = buf[1],
1709  *ubuf0 = ubuf[0], *ubuf1 = ubuf[1],
1710  *vbuf0 = vbuf[0], *vbuf1 = vbuf[1],
1711  *abuf0 = hasAlpha ? abuf[0] : NULL,
1712  *abuf1 = hasAlpha ? abuf[1] : NULL;
1713  int yalpha1 = 4096 - yalpha;
1714  int uvalpha1 = 4096 - uvalpha;
1715  int i;
1716  av_assert2(yalpha <= 4096U);
1717  av_assert2(uvalpha <= 4096U);
1718 
1719  for (i = 0; i < ((dstW + 1) >> 1); i++) {
1720  int Y1 = (buf0[i * 2] * yalpha1 + buf1[i * 2] * yalpha) >> 19;
1721  int Y2 = (buf0[i * 2 + 1] * yalpha1 + buf1[i * 2 + 1] * yalpha) >> 19;
1722  int U = (ubuf0[i] * uvalpha1 + ubuf1[i] * uvalpha) >> 19;
1723  int V = (vbuf0[i] * uvalpha1 + vbuf1[i] * uvalpha) >> 19;
1724  int A1, A2;
1725  const void *r = c->table_rV[V + YUVRGB_TABLE_HEADROOM],
1727  *b = c->table_bU[U + YUVRGB_TABLE_HEADROOM];
1728 
1729  if (hasAlpha) {
1730  A1 = (abuf0[i * 2 ] * yalpha1 + abuf1[i * 2 ] * yalpha) >> 19;
1731  A2 = (abuf0[i * 2 + 1] * yalpha1 + abuf1[i * 2 + 1] * yalpha) >> 19;
1732  A1 = av_clip_uint8(A1);
1733  A2 = av_clip_uint8(A2);
1734  }
1735 
1736  yuv2rgb_write(dest, i, Y1, Y2, hasAlpha ? A1 : 0, hasAlpha ? A2 : 0,
1737  r, g, b, y, target, hasAlpha);
1738  }
1739 }
1740 
1741 static av_always_inline void
1742 yuv2rgb_1_c_template(SwsContext *c, const int16_t *buf0,
1743  const int16_t *ubuf[2], const int16_t *vbuf[2],
1744  const int16_t *abuf0, uint8_t *dest, int dstW,
1745  int uvalpha, int y, enum AVPixelFormat target,
1746  int hasAlpha)
1747 {
1748  const int16_t *ubuf0 = ubuf[0], *vbuf0 = vbuf[0];
1749  int i;
1750 
1751  if (uvalpha < 2048) {
1752  for (i = 0; i < ((dstW + 1) >> 1); i++) {
1753  int Y1 = (buf0[i * 2 ] + 64) >> 7;
1754  int Y2 = (buf0[i * 2 + 1] + 64) >> 7;
1755  int U = (ubuf0[i] + 64) >> 7;
1756  int V = (vbuf0[i] + 64) >> 7;
1757  int A1, A2;
1758  const void *r = c->table_rV[V + YUVRGB_TABLE_HEADROOM],
1760  *b = c->table_bU[U + YUVRGB_TABLE_HEADROOM];
1761 
1762  if (hasAlpha) {
1763  A1 = abuf0[i * 2 ] * 255 + 16384 >> 15;
1764  A2 = abuf0[i * 2 + 1] * 255 + 16384 >> 15;
1765  A1 = av_clip_uint8(A1);
1766  A2 = av_clip_uint8(A2);
1767  }
1768 
1769  yuv2rgb_write(dest, i, Y1, Y2, hasAlpha ? A1 : 0, hasAlpha ? A2 : 0,
1770  r, g, b, y, target, hasAlpha);
1771  }
1772  } else {
1773  const int16_t *ubuf1 = ubuf[1], *vbuf1 = vbuf[1];
1774  for (i = 0; i < ((dstW + 1) >> 1); i++) {
1775  int Y1 = (buf0[i * 2 ] + 64) >> 7;
1776  int Y2 = (buf0[i * 2 + 1] + 64) >> 7;
1777  int U = (ubuf0[i] + ubuf1[i] + 128) >> 8;
1778  int V = (vbuf0[i] + vbuf1[i] + 128) >> 8;
1779  int A1, A2;
1780  const void *r = c->table_rV[V + YUVRGB_TABLE_HEADROOM],
1782  *b = c->table_bU[U + YUVRGB_TABLE_HEADROOM];
1783 
1784  if (hasAlpha) {
1785  A1 = (abuf0[i * 2 ] + 64) >> 7;
1786  A2 = (abuf0[i * 2 + 1] + 64) >> 7;
1787  A1 = av_clip_uint8(A1);
1788  A2 = av_clip_uint8(A2);
1789  }
1790 
1791  yuv2rgb_write(dest, i, Y1, Y2, hasAlpha ? A1 : 0, hasAlpha ? A2 : 0,
1792  r, g, b, y, target, hasAlpha);
1793  }
1794  }
1795 }
1796 
1797 #define YUV2RGBWRAPPERX(name, base, ext, fmt, hasAlpha) \
1798 static void name ## ext ## _X_c(SwsContext *c, const int16_t *lumFilter, \
1799  const int16_t **lumSrc, int lumFilterSize, \
1800  const int16_t *chrFilter, const int16_t **chrUSrc, \
1801  const int16_t **chrVSrc, int chrFilterSize, \
1802  const int16_t **alpSrc, uint8_t *dest, int dstW, \
1803  int y) \
1804 { \
1805  name ## base ## _X_c_template(c, lumFilter, lumSrc, lumFilterSize, \
1806  chrFilter, chrUSrc, chrVSrc, chrFilterSize, \
1807  alpSrc, dest, dstW, y, fmt, hasAlpha); \
1808 }
1809 
1810 #define YUV2RGBWRAPPERX2(name, base, ext, fmt, hasAlpha) \
1811 YUV2RGBWRAPPERX(name, base, ext, fmt, hasAlpha) \
1812 static void name ## ext ## _2_c(SwsContext *c, const int16_t *buf[2], \
1813  const int16_t *ubuf[2], const int16_t *vbuf[2], \
1814  const int16_t *abuf[2], uint8_t *dest, int dstW, \
1815  int yalpha, int uvalpha, int y) \
1816 { \
1817  name ## base ## _2_c_template(c, buf, ubuf, vbuf, abuf, \
1818  dest, dstW, yalpha, uvalpha, y, fmt, hasAlpha); \
1819 }
1820 
1821 #define YUV2RGBWRAPPER(name, base, ext, fmt, hasAlpha) \
1822 YUV2RGBWRAPPERX2(name, base, ext, fmt, hasAlpha) \
1823 static void name ## ext ## _1_c(SwsContext *c, const int16_t *buf0, \
1824  const int16_t *ubuf[2], const int16_t *vbuf[2], \
1825  const int16_t *abuf0, uint8_t *dest, int dstW, \
1826  int uvalpha, int y) \
1827 { \
1828  name ## base ## _1_c_template(c, buf0, ubuf, vbuf, abuf0, dest, \
1829  dstW, uvalpha, y, fmt, hasAlpha); \
1830 }
1831 
1832 #if CONFIG_SMALL
1833 YUV2RGBWRAPPER(yuv2rgb,, 32_1, AV_PIX_FMT_RGB32_1, CONFIG_SWSCALE_ALPHA && c->needAlpha)
1834 YUV2RGBWRAPPER(yuv2rgb,, 32, AV_PIX_FMT_RGB32, CONFIG_SWSCALE_ALPHA && c->needAlpha)
1835 #else
1836 #if CONFIG_SWSCALE_ALPHA
1837 YUV2RGBWRAPPER(yuv2rgb,, a32_1, AV_PIX_FMT_RGB32_1, 1)
1838 YUV2RGBWRAPPER(yuv2rgb,, a32, AV_PIX_FMT_RGB32, 1)
1839 #endif
1840 YUV2RGBWRAPPER(yuv2rgb,, x32_1, AV_PIX_FMT_RGB32_1, 0)
1841 YUV2RGBWRAPPER(yuv2rgb,, x32, AV_PIX_FMT_RGB32, 0)
1842 #endif
1843 YUV2RGBWRAPPER(yuv2, rgb, rgb24, AV_PIX_FMT_RGB24, 0)
1844 YUV2RGBWRAPPER(yuv2, rgb, bgr24, AV_PIX_FMT_BGR24, 0)
1845 YUV2RGBWRAPPER(yuv2rgb,, 16, AV_PIX_FMT_RGB565, 0)
1846 YUV2RGBWRAPPER(yuv2rgb,, 15, AV_PIX_FMT_RGB555, 0)
1847 YUV2RGBWRAPPER(yuv2rgb,, 12, AV_PIX_FMT_RGB444, 0)
1848 YUV2RGBWRAPPER(yuv2rgb,, 8, AV_PIX_FMT_RGB8, 0)
1849 YUV2RGBWRAPPER(yuv2rgb,, 4, AV_PIX_FMT_RGB4, 0)
1850 YUV2RGBWRAPPER(yuv2rgb,, 4b, AV_PIX_FMT_RGB4_BYTE, 0)
1851 YUV2RGBWRAPPER(yuv2, rgb, x2rgb10, AV_PIX_FMT_X2RGB10, 0)
1852 
1854  uint8_t *dest, int i, int Y, int A, int U, int V,
1855  int y, enum AVPixelFormat target, int hasAlpha, int err[4])
1856 {
1857  int R, G, B;
1858  int isrgb8 = target == AV_PIX_FMT_BGR8 || target == AV_PIX_FMT_RGB8;
1859 
1860  Y -= c->yuv2rgb_y_offset;
1861  Y *= c->yuv2rgb_y_coeff;
1862  Y += 1 << 21;
1863  R = (unsigned)Y + V*c->yuv2rgb_v2r_coeff;
1864  G = (unsigned)Y + V*c->yuv2rgb_v2g_coeff + U*c->yuv2rgb_u2g_coeff;
1865  B = (unsigned)Y + U*c->yuv2rgb_u2b_coeff;
1866  if ((R | G | B) & 0xC0000000) {
1867  R = av_clip_uintp2(R, 30);
1868  G = av_clip_uintp2(G, 30);
1869  B = av_clip_uintp2(B, 30);
1870  }
1871 
1872  switch(target) {
1873  case AV_PIX_FMT_ARGB:
1874  dest[0] = hasAlpha ? A : 255;
1875  dest[1] = R >> 22;
1876  dest[2] = G >> 22;
1877  dest[3] = B >> 22;
1878  break;
1879  case AV_PIX_FMT_RGB24:
1880  dest[0] = R >> 22;
1881  dest[1] = G >> 22;
1882  dest[2] = B >> 22;
1883  break;
1884  case AV_PIX_FMT_RGBA:
1885  dest[0] = R >> 22;
1886  dest[1] = G >> 22;
1887  dest[2] = B >> 22;
1888  dest[3] = hasAlpha ? A : 255;
1889  break;
1890  case AV_PIX_FMT_ABGR:
1891  dest[0] = hasAlpha ? A : 255;
1892  dest[1] = B >> 22;
1893  dest[2] = G >> 22;
1894  dest[3] = R >> 22;
1895  break;
1896  case AV_PIX_FMT_BGR24:
1897  dest[0] = B >> 22;
1898  dest[1] = G >> 22;
1899  dest[2] = R >> 22;
1900  break;
1901  case AV_PIX_FMT_BGRA:
1902  dest[0] = B >> 22;
1903  dest[1] = G >> 22;
1904  dest[2] = R >> 22;
1905  dest[3] = hasAlpha ? A : 255;
1906  break;
1907  case AV_PIX_FMT_BGR4_BYTE:
1908  case AV_PIX_FMT_RGB4_BYTE:
1909  case AV_PIX_FMT_BGR8:
1910  case AV_PIX_FMT_RGB8:
1911  {
1912  int r,g,b;
1913 
1914  switch (c->dither) {
1915  default:
1916  case SWS_DITHER_AUTO:
1917  case SWS_DITHER_ED:
1918  R >>= 22;
1919  G >>= 22;
1920  B >>= 22;
1921  R += (7*err[0] + 1*c->dither_error[0][i] + 5*c->dither_error[0][i+1] + 3*c->dither_error[0][i+2])>>4;
1922  G += (7*err[1] + 1*c->dither_error[1][i] + 5*c->dither_error[1][i+1] + 3*c->dither_error[1][i+2])>>4;
1923  B += (7*err[2] + 1*c->dither_error[2][i] + 5*c->dither_error[2][i+1] + 3*c->dither_error[2][i+2])>>4;
1924  c->dither_error[0][i] = err[0];
1925  c->dither_error[1][i] = err[1];
1926  c->dither_error[2][i] = err[2];
1927  r = R >> (isrgb8 ? 5 : 7);
1928  g = G >> (isrgb8 ? 5 : 6);
1929  b = B >> (isrgb8 ? 6 : 7);
1930  r = av_clip(r, 0, isrgb8 ? 7 : 1);
1931  g = av_clip(g, 0, isrgb8 ? 7 : 3);
1932  b = av_clip(b, 0, isrgb8 ? 3 : 1);
1933  err[0] = R - r*(isrgb8 ? 36 : 255);
1934  err[1] = G - g*(isrgb8 ? 36 : 85);
1935  err[2] = B - b*(isrgb8 ? 85 : 255);
1936  break;
1937  case SWS_DITHER_A_DITHER:
1938  if (isrgb8) {
1939  /* see http://pippin.gimp.org/a_dither/ for details/origin */
1940 #define A_DITHER(u,v) (((((u)+((v)*236))*119)&0xff))
1941  r = (((R >> 19) + A_DITHER(i,y) -96)>>8);
1942  g = (((G >> 19) + A_DITHER(i + 17,y) - 96)>>8);
1943  b = (((B >> 20) + A_DITHER(i + 17*2,y) -96)>>8);
1944  r = av_clip_uintp2(r, 3);
1945  g = av_clip_uintp2(g, 3);
1946  b = av_clip_uintp2(b, 2);
1947  } else {
1948  r = (((R >> 21) + A_DITHER(i,y)-256)>>8);
1949  g = (((G >> 19) + A_DITHER(i + 17,y)-256)>>8);
1950  b = (((B >> 21) + A_DITHER(i + 17*2,y)-256)>>8);
1951  r = av_clip_uintp2(r, 1);
1952  g = av_clip_uintp2(g, 2);
1953  b = av_clip_uintp2(b, 1);
1954  }
1955  break;
1956  case SWS_DITHER_X_DITHER:
1957  if (isrgb8) {
1958  /* see http://pippin.gimp.org/a_dither/ for details/origin */
1959 #define X_DITHER(u,v) (((((u)^((v)*237))*181)&0x1ff)/2)
1960  r = (((R >> 19) + X_DITHER(i,y) - 96)>>8);
1961  g = (((G >> 19) + X_DITHER(i + 17,y) - 96)>>8);
1962  b = (((B >> 20) + X_DITHER(i + 17*2,y) - 96)>>8);
1963  r = av_clip_uintp2(r, 3);
1964  g = av_clip_uintp2(g, 3);
1965  b = av_clip_uintp2(b, 2);
1966  } else {
1967  r = (((R >> 21) + X_DITHER(i,y)-256)>>8);
1968  g = (((G >> 19) + X_DITHER(i + 17,y)-256)>>8);
1969  b = (((B >> 21) + X_DITHER(i + 17*2,y)-256)>>8);
1970  r = av_clip_uintp2(r, 1);
1971  g = av_clip_uintp2(g, 2);
1972  b = av_clip_uintp2(b, 1);
1973  }
1974 
1975  break;
1976  }
1977 
1978  if(target == AV_PIX_FMT_BGR4_BYTE) {
1979  dest[0] = r + 2*g + 8*b;
1980  } else if(target == AV_PIX_FMT_RGB4_BYTE) {
1981  dest[0] = b + 2*g + 8*r;
1982  } else if(target == AV_PIX_FMT_BGR8) {
1983  dest[0] = r + 8*g + 64*b;
1984  } else if(target == AV_PIX_FMT_RGB8) {
1985  dest[0] = b + 4*g + 32*r;
1986  } else
1987  av_assert2(0);
1988  break;}
1989  }
1990 }
1991 
1992 static av_always_inline void
1993 yuv2rgb_full_X_c_template(SwsContext *c, const int16_t *lumFilter,
1994  const int16_t **lumSrc, int lumFilterSize,
1995  const int16_t *chrFilter, const int16_t **chrUSrc,
1996  const int16_t **chrVSrc, int chrFilterSize,
1997  const int16_t **alpSrc, uint8_t *dest,
1998  int dstW, int y, enum AVPixelFormat target, int hasAlpha)
1999 {
2000  int i;
2001  int step = (target == AV_PIX_FMT_RGB24 || target == AV_PIX_FMT_BGR24) ? 3 : 4;
2002  int err[4] = {0};
2003  int A = 0; //init to silence warning
2004 
2005  if( target == AV_PIX_FMT_BGR4_BYTE || target == AV_PIX_FMT_RGB4_BYTE
2006  || target == AV_PIX_FMT_BGR8 || target == AV_PIX_FMT_RGB8)
2007  step = 1;
2008 
2009  for (i = 0; i < dstW; i++) {
2010  int j;
2011  int Y = 1<<9;
2012  int U = (1<<9)-(128 << 19);
2013  int V = (1<<9)-(128 << 19);
2014 
2015  for (j = 0; j < lumFilterSize; j++) {
2016  Y += lumSrc[j][i] * lumFilter[j];
2017  }
2018  for (j = 0; j < chrFilterSize; j++) {
2019  U += chrUSrc[j][i] * chrFilter[j];
2020  V += chrVSrc[j][i] * chrFilter[j];
2021  }
2022  Y >>= 10;
2023  U >>= 10;
2024  V >>= 10;
2025  if (hasAlpha) {
2026  A = 1 << 18;
2027  for (j = 0; j < lumFilterSize; j++) {
2028  A += alpSrc[j][i] * lumFilter[j];
2029  }
2030  A >>= 19;
2031  if (A & 0x100)
2032  A = av_clip_uint8(A);
2033  }
2034  yuv2rgb_write_full(c, dest, i, Y, A, U, V, y, target, hasAlpha, err);
2035  dest += step;
2036  }
2037  c->dither_error[0][i] = err[0];
2038  c->dither_error[1][i] = err[1];
2039  c->dither_error[2][i] = err[2];
2040 }
2041 
2042 static av_always_inline void
2043 yuv2rgb_full_2_c_template(SwsContext *c, const int16_t *buf[2],
2044  const int16_t *ubuf[2], const int16_t *vbuf[2],
2045  const int16_t *abuf[2], uint8_t *dest, int dstW,
2046  int yalpha, int uvalpha, int y,
2047  enum AVPixelFormat target, int hasAlpha)
2048 {
2049  const int16_t *buf0 = buf[0], *buf1 = buf[1],
2050  *ubuf0 = ubuf[0], *ubuf1 = ubuf[1],
2051  *vbuf0 = vbuf[0], *vbuf1 = vbuf[1],
2052  *abuf0 = hasAlpha ? abuf[0] : NULL,
2053  *abuf1 = hasAlpha ? abuf[1] : NULL;
2054  int yalpha1 = 4096 - yalpha;
2055  int uvalpha1 = 4096 - uvalpha;
2056  int i;
2057  int step = (target == AV_PIX_FMT_RGB24 || target == AV_PIX_FMT_BGR24) ? 3 : 4;
2058  int err[4] = {0};
2059  int A = 0; // init to silcene warning
2060 
2061  av_assert2(yalpha <= 4096U);
2062  av_assert2(uvalpha <= 4096U);
2063 
2064  if( target == AV_PIX_FMT_BGR4_BYTE || target == AV_PIX_FMT_RGB4_BYTE
2065  || target == AV_PIX_FMT_BGR8 || target == AV_PIX_FMT_RGB8)
2066  step = 1;
2067 
2068  for (i = 0; i < dstW; i++) {
2069  int Y = ( buf0[i] * yalpha1 + buf1[i] * yalpha ) >> 10; //FIXME rounding
2070  int U = (ubuf0[i] * uvalpha1 + ubuf1[i] * uvalpha-(128 << 19)) >> 10;
2071  int V = (vbuf0[i] * uvalpha1 + vbuf1[i] * uvalpha-(128 << 19)) >> 10;
2072 
2073  if (hasAlpha) {
2074  A = (abuf0[i] * yalpha1 + abuf1[i] * yalpha + (1<<18)) >> 19;
2075  if (A & 0x100)
2076  A = av_clip_uint8(A);
2077  }
2078 
2079  yuv2rgb_write_full(c, dest, i, Y, A, U, V, y, target, hasAlpha, err);
2080  dest += step;
2081  }
2082  c->dither_error[0][i] = err[0];
2083  c->dither_error[1][i] = err[1];
2084  c->dither_error[2][i] = err[2];
2085 }
2086 
2087 static av_always_inline void
2089  const int16_t *ubuf[2], const int16_t *vbuf[2],
2090  const int16_t *abuf0, uint8_t *dest, int dstW,
2091  int uvalpha, int y, enum AVPixelFormat target,
2092  int hasAlpha)
2093 {
2094  const int16_t *ubuf0 = ubuf[0], *vbuf0 = vbuf[0];
2095  int i;
2096  int step = (target == AV_PIX_FMT_RGB24 || target == AV_PIX_FMT_BGR24) ? 3 : 4;
2097  int err[4] = {0};
2098 
2099  if( target == AV_PIX_FMT_BGR4_BYTE || target == AV_PIX_FMT_RGB4_BYTE
2100  || target == AV_PIX_FMT_BGR8 || target == AV_PIX_FMT_RGB8)
2101  step = 1;
2102 
2103  if (uvalpha < 2048) {
2104  int A = 0; //init to silence warning
2105  for (i = 0; i < dstW; i++) {
2106  int Y = buf0[i] * 4;
2107  int U = (ubuf0[i] - (128<<7)) * 4;
2108  int V = (vbuf0[i] - (128<<7)) * 4;
2109 
2110  if (hasAlpha) {
2111  A = (abuf0[i] + 64) >> 7;
2112  if (A & 0x100)
2113  A = av_clip_uint8(A);
2114  }
2115 
2116  yuv2rgb_write_full(c, dest, i, Y, A, U, V, y, target, hasAlpha, err);
2117  dest += step;
2118  }
2119  } else {
2120  const int16_t *ubuf1 = ubuf[1], *vbuf1 = vbuf[1];
2121  int A = 0; //init to silence warning
2122  for (i = 0; i < dstW; i++) {
2123  int Y = buf0[i] * 4;
2124  int U = (ubuf0[i] + ubuf1[i] - (128<<8)) * 2;
2125  int V = (vbuf0[i] + vbuf1[i] - (128<<8)) * 2;
2126 
2127  if (hasAlpha) {
2128  A = (abuf0[i] + 64) >> 7;
2129  if (A & 0x100)
2130  A = av_clip_uint8(A);
2131  }
2132 
2133  yuv2rgb_write_full(c, dest, i, Y, A, U, V, y, target, hasAlpha, err);
2134  dest += step;
2135  }
2136  }
2137 
2138  c->dither_error[0][i] = err[0];
2139  c->dither_error[1][i] = err[1];
2140  c->dither_error[2][i] = err[2];
2141 }
2142 
2143 #if CONFIG_SMALL
2144 YUV2RGBWRAPPER(yuv2, rgb_full, bgra32_full, AV_PIX_FMT_BGRA, CONFIG_SWSCALE_ALPHA && c->needAlpha)
2145 YUV2RGBWRAPPER(yuv2, rgb_full, abgr32_full, AV_PIX_FMT_ABGR, CONFIG_SWSCALE_ALPHA && c->needAlpha)
2146 YUV2RGBWRAPPER(yuv2, rgb_full, rgba32_full, AV_PIX_FMT_RGBA, CONFIG_SWSCALE_ALPHA && c->needAlpha)
2147 YUV2RGBWRAPPER(yuv2, rgb_full, argb32_full, AV_PIX_FMT_ARGB, CONFIG_SWSCALE_ALPHA && c->needAlpha)
2148 #else
2149 #if CONFIG_SWSCALE_ALPHA
2150 YUV2RGBWRAPPER(yuv2, rgb_full, bgra32_full, AV_PIX_FMT_BGRA, 1)
2151 YUV2RGBWRAPPER(yuv2, rgb_full, abgr32_full, AV_PIX_FMT_ABGR, 1)
2152 YUV2RGBWRAPPER(yuv2, rgb_full, rgba32_full, AV_PIX_FMT_RGBA, 1)
2153 YUV2RGBWRAPPER(yuv2, rgb_full, argb32_full, AV_PIX_FMT_ARGB, 1)
2154 #endif
2155 YUV2RGBWRAPPER(yuv2, rgb_full, bgrx32_full, AV_PIX_FMT_BGRA, 0)
2156 YUV2RGBWRAPPER(yuv2, rgb_full, xbgr32_full, AV_PIX_FMT_ABGR, 0)
2157 YUV2RGBWRAPPER(yuv2, rgb_full, rgbx32_full, AV_PIX_FMT_RGBA, 0)
2158 YUV2RGBWRAPPER(yuv2, rgb_full, xrgb32_full, AV_PIX_FMT_ARGB, 0)
2159 #endif
2160 YUV2RGBWRAPPER(yuv2, rgb_full, bgr24_full, AV_PIX_FMT_BGR24, 0)
2161 YUV2RGBWRAPPER(yuv2, rgb_full, rgb24_full, AV_PIX_FMT_RGB24, 0)
2162 
2163 YUV2RGBWRAPPER(yuv2, rgb_full, bgr4_byte_full, AV_PIX_FMT_BGR4_BYTE, 0)
2164 YUV2RGBWRAPPER(yuv2, rgb_full, rgb4_byte_full, AV_PIX_FMT_RGB4_BYTE, 0)
2165 YUV2RGBWRAPPER(yuv2, rgb_full, bgr8_full, AV_PIX_FMT_BGR8, 0)
2166 YUV2RGBWRAPPER(yuv2, rgb_full, rgb8_full, AV_PIX_FMT_RGB8, 0)
2167 
2168 static void
2169 yuv2gbrp_full_X_c(SwsContext *c, const int16_t *lumFilter,
2170  const int16_t **lumSrc, int lumFilterSize,
2171  const int16_t *chrFilter, const int16_t **chrUSrc,
2172  const int16_t **chrVSrc, int chrFilterSize,
2173  const int16_t **alpSrc, uint8_t **dest,
2174  int dstW, int y)
2175 {
2177  int i;
2178  int hasAlpha = (desc->flags & AV_PIX_FMT_FLAG_ALPHA) && alpSrc;
2179  uint16_t **dest16 = (uint16_t**)dest;
2180  int SH = 22 + 8 - desc->comp[0].depth;
2181  int A = 0; // init to silence warning
2182 
2183  for (i = 0; i < dstW; i++) {
2184  int j;
2185  int Y = 1 << 9;
2186  int U = (1 << 9) - (128 << 19);
2187  int V = (1 << 9) - (128 << 19);
2188  int R, G, B;
2189 
2190  for (j = 0; j < lumFilterSize; j++)
2191  Y += lumSrc[j][i] * lumFilter[j];
2192 
2193  for (j = 0; j < chrFilterSize; j++) {
2194  U += chrUSrc[j][i] * chrFilter[j];
2195  V += chrVSrc[j][i] * chrFilter[j];
2196  }
2197 
2198  Y >>= 10;
2199  U >>= 10;
2200  V >>= 10;
2201 
2202  if (hasAlpha) {
2203  A = 1 << 18;
2204 
2205  for (j = 0; j < lumFilterSize; j++)
2206  A += alpSrc[j][i] * lumFilter[j];
2207 
2208  if (A & 0xF8000000)
2209  A = av_clip_uintp2(A, 27);
2210  }
2211 
2212  Y -= c->yuv2rgb_y_offset;
2213  Y *= c->yuv2rgb_y_coeff;
2214  Y += 1 << (SH-1);
2215  R = Y + V * c->yuv2rgb_v2r_coeff;
2216  G = Y + V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff;
2217  B = Y + U * c->yuv2rgb_u2b_coeff;
2218 
2219  if ((R | G | B) & 0xC0000000) {
2220  R = av_clip_uintp2(R, 30);
2221  G = av_clip_uintp2(G, 30);
2222  B = av_clip_uintp2(B, 30);
2223  }
2224 
2225  if (SH != 22) {
2226  dest16[0][i] = G >> SH;
2227  dest16[1][i] = B >> SH;
2228  dest16[2][i] = R >> SH;
2229  if (hasAlpha)
2230  dest16[3][i] = A >> (SH - 3);
2231  } else {
2232  dest[0][i] = G >> 22;
2233  dest[1][i] = B >> 22;
2234  dest[2][i] = R >> 22;
2235  if (hasAlpha)
2236  dest[3][i] = A >> 19;
2237  }
2238  }
2239  if (SH != 22 && (!isBE(c->dstFormat)) != (!HAVE_BIGENDIAN)) {
2240  for (i = 0; i < dstW; i++) {
2241  dest16[0][i] = av_bswap16(dest16[0][i]);
2242  dest16[1][i] = av_bswap16(dest16[1][i]);
2243  dest16[2][i] = av_bswap16(dest16[2][i]);
2244  if (hasAlpha)
2245  dest16[3][i] = av_bswap16(dest16[3][i]);
2246  }
2247  }
2248 }
2249 
2250 static void
2251 yuv2gbrp16_full_X_c(SwsContext *c, const int16_t *lumFilter,
2252  const int16_t **lumSrcx, int lumFilterSize,
2253  const int16_t *chrFilter, const int16_t **chrUSrcx,
2254  const int16_t **chrVSrcx, int chrFilterSize,
2255  const int16_t **alpSrcx, uint8_t **dest,
2256  int dstW, int y)
2257 {
2259  int i;
2260  int hasAlpha = (desc->flags & AV_PIX_FMT_FLAG_ALPHA) && alpSrcx;
2261  uint16_t **dest16 = (uint16_t**)dest;
2262  const int32_t **lumSrc = (const int32_t**)lumSrcx;
2263  const int32_t **chrUSrc = (const int32_t**)chrUSrcx;
2264  const int32_t **chrVSrc = (const int32_t**)chrVSrcx;
2265  const int32_t **alpSrc = (const int32_t**)alpSrcx;
2266 
2267  for (i = 0; i < dstW; i++) {
2268  int j;
2269  int Y = -0x40000000;
2270  int U = -(128 << 23);
2271  int V = -(128 << 23);
2272  int R, G, B, A;
2273 
2274  for (j = 0; j < lumFilterSize; j++)
2275  Y += lumSrc[j][i] * (unsigned)lumFilter[j];
2276 
2277  for (j = 0; j < chrFilterSize; j++) {
2278  U += chrUSrc[j][i] * (unsigned)chrFilter[j];
2279  V += chrVSrc[j][i] * (unsigned)chrFilter[j];
2280  }
2281 
2282  Y >>= 14;
2283  Y += 0x10000;
2284  U >>= 14;
2285  V >>= 14;
2286 
2287  if (hasAlpha) {
2288  A = -0x40000000;
2289 
2290  for (j = 0; j < lumFilterSize; j++)
2291  A += alpSrc[j][i] * (unsigned)lumFilter[j];
2292 
2293  A >>= 1;
2294  A += 0x20002000;
2295  }
2296 
2297  Y -= c->yuv2rgb_y_offset;
2298  Y *= c->yuv2rgb_y_coeff;
2299  Y += 1 << 13;
2300  R = V * c->yuv2rgb_v2r_coeff;
2301  G = V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff;
2302  B = U * c->yuv2rgb_u2b_coeff;
2303 
2304  R = av_clip_uintp2(Y + R, 30);
2305  G = av_clip_uintp2(Y + G, 30);
2306  B = av_clip_uintp2(Y + B, 30);
2307 
2308  dest16[0][i] = G >> 14;
2309  dest16[1][i] = B >> 14;
2310  dest16[2][i] = R >> 14;
2311  if (hasAlpha)
2312  dest16[3][i] = av_clip_uintp2(A, 30) >> 14;
2313  }
2314  if ((!isBE(c->dstFormat)) != (!HAVE_BIGENDIAN)) {
2315  for (i = 0; i < dstW; i++) {
2316  dest16[0][i] = av_bswap16(dest16[0][i]);
2317  dest16[1][i] = av_bswap16(dest16[1][i]);
2318  dest16[2][i] = av_bswap16(dest16[2][i]);
2319  if (hasAlpha)
2320  dest16[3][i] = av_bswap16(dest16[3][i]);
2321  }
2322  }
2323 }
2324 
2325 static void
2326 yuv2gbrpf32_full_X_c(SwsContext *c, const int16_t *lumFilter,
2327  const int16_t **lumSrcx, int lumFilterSize,
2328  const int16_t *chrFilter, const int16_t **chrUSrcx,
2329  const int16_t **chrVSrcx, int chrFilterSize,
2330  const int16_t **alpSrcx, uint8_t **dest,
2331  int dstW, int y)
2332 {
2334  int i;
2335  int hasAlpha = (desc->flags & AV_PIX_FMT_FLAG_ALPHA) && alpSrcx;
2336  uint32_t **dest32 = (uint32_t**)dest;
2337  const int32_t **lumSrc = (const int32_t**)lumSrcx;
2338  const int32_t **chrUSrc = (const int32_t**)chrUSrcx;
2339  const int32_t **chrVSrc = (const int32_t**)chrVSrcx;
2340  const int32_t **alpSrc = (const int32_t**)alpSrcx;
2341  static const float float_mult = 1.0f / 65535.0f;
2342 
2343  for (i = 0; i < dstW; i++) {
2344  int j;
2345  int Y = -0x40000000;
2346  int U = -(128 << 23);
2347  int V = -(128 << 23);
2348  int R, G, B, A;
2349 
2350  for (j = 0; j < lumFilterSize; j++)
2351  Y += lumSrc[j][i] * (unsigned)lumFilter[j];
2352 
2353  for (j = 0; j < chrFilterSize; j++) {
2354  U += chrUSrc[j][i] * (unsigned)chrFilter[j];
2355  V += chrVSrc[j][i] * (unsigned)chrFilter[j];
2356  }
2357 
2358  Y >>= 14;
2359  Y += 0x10000;
2360  U >>= 14;
2361  V >>= 14;
2362 
2363  if (hasAlpha) {
2364  A = -0x40000000;
2365 
2366  for (j = 0; j < lumFilterSize; j++)
2367  A += alpSrc[j][i] * (unsigned)lumFilter[j];
2368 
2369  A >>= 1;
2370  A += 0x20002000;
2371  }
2372 
2373  Y -= c->yuv2rgb_y_offset;
2374  Y *= c->yuv2rgb_y_coeff;
2375  Y += 1 << 13;
2376  R = V * c->yuv2rgb_v2r_coeff;
2377  G = V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff;
2378  B = U * c->yuv2rgb_u2b_coeff;
2379 
2380  R = av_clip_uintp2(Y + R, 30);
2381  G = av_clip_uintp2(Y + G, 30);
2382  B = av_clip_uintp2(Y + B, 30);
2383 
2384  dest32[0][i] = av_float2int(float_mult * (float)(G >> 14));
2385  dest32[1][i] = av_float2int(float_mult * (float)(B >> 14));
2386  dest32[2][i] = av_float2int(float_mult * (float)(R >> 14));
2387  if (hasAlpha)
2388  dest32[3][i] = av_float2int(float_mult * (float)(av_clip_uintp2(A, 30) >> 14));
2389  }
2390  if ((!isBE(c->dstFormat)) != (!HAVE_BIGENDIAN)) {
2391  for (i = 0; i < dstW; i++) {
2392  dest32[0][i] = av_bswap32(dest32[0][i]);
2393  dest32[1][i] = av_bswap32(dest32[1][i]);
2394  dest32[2][i] = av_bswap32(dest32[2][i]);
2395  if (hasAlpha)
2396  dest32[3][i] = av_bswap32(dest32[3][i]);
2397  }
2398  }
2399 }
2400 
2401 static void
2402 yuv2ya8_1_c(SwsContext *c, const int16_t *buf0,
2403  const int16_t *ubuf[2], const int16_t *vbuf[2],
2404  const int16_t *abuf0, uint8_t *dest, int dstW,
2405  int uvalpha, int y)
2406 {
2407  int hasAlpha = !!abuf0;
2408  int i;
2409 
2410  for (i = 0; i < dstW; i++) {
2411  int Y = (buf0[i] + 64) >> 7;
2412  int A;
2413 
2414  Y = av_clip_uint8(Y);
2415 
2416  if (hasAlpha) {
2417  A = (abuf0[i] + 64) >> 7;
2418  if (A & 0x100)
2419  A = av_clip_uint8(A);
2420  }
2421 
2422  dest[i * 2 ] = Y;
2423  dest[i * 2 + 1] = hasAlpha ? A : 255;
2424  }
2425 }
2426 
2427 static void
2428 yuv2ya8_2_c(SwsContext *c, const int16_t *buf[2],
2429  const int16_t *ubuf[2], const int16_t *vbuf[2],
2430  const int16_t *abuf[2], uint8_t *dest, int dstW,
2431  int yalpha, int uvalpha, int y)
2432 {
2433  int hasAlpha = abuf && abuf[0] && abuf[1];
2434  const int16_t *buf0 = buf[0], *buf1 = buf[1],
2435  *abuf0 = hasAlpha ? abuf[0] : NULL,
2436  *abuf1 = hasAlpha ? abuf[1] : NULL;
2437  int yalpha1 = 4096 - yalpha;
2438  int i;
2439 
2440  av_assert2(yalpha <= 4096U);
2441 
2442  for (i = 0; i < dstW; i++) {
2443  int Y = (buf0[i] * yalpha1 + buf1[i] * yalpha) >> 19;
2444  int A;
2445 
2446  Y = av_clip_uint8(Y);
2447 
2448  if (hasAlpha) {
2449  A = (abuf0[i] * yalpha1 + abuf1[i] * yalpha) >> 19;
2450  A = av_clip_uint8(A);
2451  }
2452 
2453  dest[i * 2 ] = Y;
2454  dest[i * 2 + 1] = hasAlpha ? A : 255;
2455  }
2456 }
2457 
2458 static void
2459 yuv2ya8_X_c(SwsContext *c, const int16_t *lumFilter,
2460  const int16_t **lumSrc, int lumFilterSize,
2461  const int16_t *chrFilter, const int16_t **chrUSrc,
2462  const int16_t **chrVSrc, int chrFilterSize,
2463  const int16_t **alpSrc, uint8_t *dest, int dstW, int y)
2464 {
2465  int hasAlpha = !!alpSrc;
2466  int i;
2467 
2468  for (i = 0; i < dstW; i++) {
2469  int j;
2470  int Y = 1 << 18, A = 1 << 18;
2471 
2472  for (j = 0; j < lumFilterSize; j++)
2473  Y += lumSrc[j][i] * lumFilter[j];
2474 
2475  Y >>= 19;
2476  if (Y & 0x100)
2477  Y = av_clip_uint8(Y);
2478 
2479  if (hasAlpha) {
2480  for (j = 0; j < lumFilterSize; j++)
2481  A += alpSrc[j][i] * lumFilter[j];
2482 
2483  A >>= 19;
2484 
2485  if (A & 0x100)
2486  A = av_clip_uint8(A);
2487  }
2488 
2489  dest[2 * i ] = Y;
2490  dest[2 * i + 1] = hasAlpha ? A : 255;
2491  }
2492 }
2493 
2494 static void
2495 yuv2ayuv64le_X_c(SwsContext *c, const int16_t *lumFilter,
2496  const int16_t **_lumSrc, int lumFilterSize,
2497  const int16_t *chrFilter, const int16_t **_chrUSrc,
2498  const int16_t **_chrVSrc, int chrFilterSize,
2499  const int16_t **_alpSrc, uint8_t *dest, int dstW, int y)
2500 {
2501  const int32_t **lumSrc = (const int32_t **) _lumSrc,
2502  **chrUSrc = (const int32_t **) _chrUSrc,
2503  **chrVSrc = (const int32_t **) _chrVSrc,
2504  **alpSrc = (const int32_t **) _alpSrc;
2505  int hasAlpha = !!alpSrc;
2506  int i;
2507 
2508  for (i = 0; i < dstW; i++) {
2509  int Y = 1 << 14, U = 1 << 14;
2510  int V = 1 << 14, A = 1 << 14;
2511  int j;
2512 
2513  Y -= 0x40000000;
2514  U -= 0x40000000;
2515  V -= 0x40000000;
2516  A -= 0x40000000;
2517 
2518  for (j = 0; j < lumFilterSize; j++)
2519  Y += lumSrc[j][i] * (unsigned)lumFilter[j];
2520 
2521  for (j = 0; j < chrFilterSize; j++)
2522  U += chrUSrc[j][i] * (unsigned)chrFilter[j];
2523 
2524  for (j = 0; j < chrFilterSize; j++)
2525  V += chrVSrc[j][i] * (unsigned)chrFilter[j];
2526 
2527  if (hasAlpha)
2528  for (j = 0; j < lumFilterSize; j++)
2529  A += alpSrc[j][i] * (unsigned)lumFilter[j];
2530 
2531  Y = 0x8000 + av_clip_int16(Y >> 15);
2532  U = 0x8000 + av_clip_int16(U >> 15);
2533  V = 0x8000 + av_clip_int16(V >> 15);
2534  A = 0x8000 + av_clip_int16(A >> 15);
2535 
2536  AV_WL16(dest + 8 * i, hasAlpha ? A : 65535);
2537  AV_WL16(dest + 8 * i + 2, Y);
2538  AV_WL16(dest + 8 * i + 4, U);
2539  AV_WL16(dest + 8 * i + 6, V);
2540  }
2541 }
2542 
2544  yuv2planar1_fn *yuv2plane1,
2546  yuv2interleavedX_fn *yuv2nv12cX,
2547  yuv2packed1_fn *yuv2packed1,
2548  yuv2packed2_fn *yuv2packed2,
2549  yuv2packedX_fn *yuv2packedX,
2550  yuv2anyX_fn *yuv2anyX)
2551 {
2552  enum AVPixelFormat dstFormat = c->dstFormat;
2553  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(dstFormat);
2554 
2555  if (dstFormat == AV_PIX_FMT_P010LE || dstFormat == AV_PIX_FMT_P010BE) {
2556  *yuv2plane1 = isBE(dstFormat) ? yuv2p010l1_BE_c : yuv2p010l1_LE_c;
2557  *yuv2planeX = isBE(dstFormat) ? yuv2p010lX_BE_c : yuv2p010lX_LE_c;
2558  *yuv2nv12cX = yuv2p010cX_c;
2559  } else if (is16BPS(dstFormat)) {
2560  *yuv2planeX = isBE(dstFormat) ? yuv2planeX_16BE_c : yuv2planeX_16LE_c;
2561  *yuv2plane1 = isBE(dstFormat) ? yuv2plane1_16BE_c : yuv2plane1_16LE_c;
2562  if (dstFormat == AV_PIX_FMT_P016LE || dstFormat == AV_PIX_FMT_P016BE) {
2563  *yuv2nv12cX = yuv2p016cX_c;
2564  }
2565  } else if (isNBPS(dstFormat)) {
2566  if (desc->comp[0].depth == 9) {
2567  *yuv2planeX = isBE(dstFormat) ? yuv2planeX_9BE_c : yuv2planeX_9LE_c;
2568  *yuv2plane1 = isBE(dstFormat) ? yuv2plane1_9BE_c : yuv2plane1_9LE_c;
2569  } else if (desc->comp[0].depth == 10) {
2570  *yuv2planeX = isBE(dstFormat) ? yuv2planeX_10BE_c : yuv2planeX_10LE_c;
2571  *yuv2plane1 = isBE(dstFormat) ? yuv2plane1_10BE_c : yuv2plane1_10LE_c;
2572  } else if (desc->comp[0].depth == 12) {
2573  *yuv2planeX = isBE(dstFormat) ? yuv2planeX_12BE_c : yuv2planeX_12LE_c;
2574  *yuv2plane1 = isBE(dstFormat) ? yuv2plane1_12BE_c : yuv2plane1_12LE_c;
2575  } else if (desc->comp[0].depth == 14) {
2576  *yuv2planeX = isBE(dstFormat) ? yuv2planeX_14BE_c : yuv2planeX_14LE_c;
2577  *yuv2plane1 = isBE(dstFormat) ? yuv2plane1_14BE_c : yuv2plane1_14LE_c;
2578  } else
2579  av_assert0(0);
2580  } else if (dstFormat == AV_PIX_FMT_GRAYF32BE) {
2581  *yuv2planeX = yuv2planeX_floatBE_c;
2582  *yuv2plane1 = yuv2plane1_floatBE_c;
2583  } else if (dstFormat == AV_PIX_FMT_GRAYF32LE) {
2584  *yuv2planeX = yuv2planeX_floatLE_c;
2585  *yuv2plane1 = yuv2plane1_floatLE_c;
2586  } else {
2587  *yuv2plane1 = yuv2plane1_8_c;
2588  *yuv2planeX = yuv2planeX_8_c;
2589  if (dstFormat == AV_PIX_FMT_NV12 || dstFormat == AV_PIX_FMT_NV21 ||
2590  dstFormat == AV_PIX_FMT_NV24 || dstFormat == AV_PIX_FMT_NV42)
2591  *yuv2nv12cX = yuv2nv12cX_c;
2592  }
2593 
2594  if(c->flags & SWS_FULL_CHR_H_INT) {
2595  switch (dstFormat) {
2596  case AV_PIX_FMT_RGBA:
2597 #if CONFIG_SMALL
2598  *yuv2packedX = yuv2rgba32_full_X_c;
2599  *yuv2packed2 = yuv2rgba32_full_2_c;
2600  *yuv2packed1 = yuv2rgba32_full_1_c;
2601 #else
2602 #if CONFIG_SWSCALE_ALPHA
2603  if (c->needAlpha) {
2604  *yuv2packedX = yuv2rgba32_full_X_c;
2605  *yuv2packed2 = yuv2rgba32_full_2_c;
2606  *yuv2packed1 = yuv2rgba32_full_1_c;
2607  } else
2608 #endif /* CONFIG_SWSCALE_ALPHA */
2609  {
2610  *yuv2packedX = yuv2rgbx32_full_X_c;
2611  *yuv2packed2 = yuv2rgbx32_full_2_c;
2612  *yuv2packed1 = yuv2rgbx32_full_1_c;
2613  }
2614 #endif /* !CONFIG_SMALL */
2615  break;
2616  case AV_PIX_FMT_ARGB:
2617 #if CONFIG_SMALL
2618  *yuv2packedX = yuv2argb32_full_X_c;
2619  *yuv2packed2 = yuv2argb32_full_2_c;
2620  *yuv2packed1 = yuv2argb32_full_1_c;
2621 #else
2622 #if CONFIG_SWSCALE_ALPHA
2623  if (c->needAlpha) {
2624  *yuv2packedX = yuv2argb32_full_X_c;
2625  *yuv2packed2 = yuv2argb32_full_2_c;
2626  *yuv2packed1 = yuv2argb32_full_1_c;
2627  } else
2628 #endif /* CONFIG_SWSCALE_ALPHA */
2629  {
2630  *yuv2packedX = yuv2xrgb32_full_X_c;
2631  *yuv2packed2 = yuv2xrgb32_full_2_c;
2632  *yuv2packed1 = yuv2xrgb32_full_1_c;
2633  }
2634 #endif /* !CONFIG_SMALL */
2635  break;
2636  case AV_PIX_FMT_BGRA:
2637 #if CONFIG_SMALL
2638  *yuv2packedX = yuv2bgra32_full_X_c;
2639  *yuv2packed2 = yuv2bgra32_full_2_c;
2640  *yuv2packed1 = yuv2bgra32_full_1_c;
2641 #else
2642 #if CONFIG_SWSCALE_ALPHA
2643  if (c->needAlpha) {
2644  *yuv2packedX = yuv2bgra32_full_X_c;
2645  *yuv2packed2 = yuv2bgra32_full_2_c;
2646  *yuv2packed1 = yuv2bgra32_full_1_c;
2647  } else
2648 #endif /* CONFIG_SWSCALE_ALPHA */
2649  {
2650  *yuv2packedX = yuv2bgrx32_full_X_c;
2651  *yuv2packed2 = yuv2bgrx32_full_2_c;
2652  *yuv2packed1 = yuv2bgrx32_full_1_c;
2653  }
2654 #endif /* !CONFIG_SMALL */
2655  break;
2656  case AV_PIX_FMT_ABGR:
2657 #if CONFIG_SMALL
2658  *yuv2packedX = yuv2abgr32_full_X_c;
2659  *yuv2packed2 = yuv2abgr32_full_2_c;
2660  *yuv2packed1 = yuv2abgr32_full_1_c;
2661 #else
2662 #if CONFIG_SWSCALE_ALPHA
2663  if (c->needAlpha) {
2664  *yuv2packedX = yuv2abgr32_full_X_c;
2665  *yuv2packed2 = yuv2abgr32_full_2_c;
2666  *yuv2packed1 = yuv2abgr32_full_1_c;
2667  } else
2668 #endif /* CONFIG_SWSCALE_ALPHA */
2669  {
2670  *yuv2packedX = yuv2xbgr32_full_X_c;
2671  *yuv2packed2 = yuv2xbgr32_full_2_c;
2672  *yuv2packed1 = yuv2xbgr32_full_1_c;
2673  }
2674 #endif /* !CONFIG_SMALL */
2675  break;
2676  case AV_PIX_FMT_RGBA64LE:
2677 #if CONFIG_SWSCALE_ALPHA
2678  if (c->needAlpha) {
2679  *yuv2packedX = yuv2rgba64le_full_X_c;
2680  *yuv2packed2 = yuv2rgba64le_full_2_c;
2681  *yuv2packed1 = yuv2rgba64le_full_1_c;
2682  } else
2683 #endif /* CONFIG_SWSCALE_ALPHA */
2684  {
2685  *yuv2packedX = yuv2rgbx64le_full_X_c;
2686  *yuv2packed2 = yuv2rgbx64le_full_2_c;
2687  *yuv2packed1 = yuv2rgbx64le_full_1_c;
2688  }
2689  break;
2690  case AV_PIX_FMT_RGBA64BE:
2691 #if CONFIG_SWSCALE_ALPHA
2692  if (c->needAlpha) {
2693  *yuv2packedX = yuv2rgba64be_full_X_c;
2694  *yuv2packed2 = yuv2rgba64be_full_2_c;
2695  *yuv2packed1 = yuv2rgba64be_full_1_c;
2696  } else
2697 #endif /* CONFIG_SWSCALE_ALPHA */
2698  {
2699  *yuv2packedX = yuv2rgbx64be_full_X_c;
2700  *yuv2packed2 = yuv2rgbx64be_full_2_c;
2701  *yuv2packed1 = yuv2rgbx64be_full_1_c;
2702  }
2703  break;
2704  case AV_PIX_FMT_BGRA64LE:
2705 #if CONFIG_SWSCALE_ALPHA
2706  if (c->needAlpha) {
2707  *yuv2packedX = yuv2bgra64le_full_X_c;
2708  *yuv2packed2 = yuv2bgra64le_full_2_c;
2709  *yuv2packed1 = yuv2bgra64le_full_1_c;
2710  } else
2711 #endif /* CONFIG_SWSCALE_ALPHA */
2712  {
2713  *yuv2packedX = yuv2bgrx64le_full_X_c;
2714  *yuv2packed2 = yuv2bgrx64le_full_2_c;
2715  *yuv2packed1 = yuv2bgrx64le_full_1_c;
2716  }
2717  break;
2718  case AV_PIX_FMT_BGRA64BE:
2719 #if CONFIG_SWSCALE_ALPHA
2720  if (c->needAlpha) {
2721  *yuv2packedX = yuv2bgra64be_full_X_c;
2722  *yuv2packed2 = yuv2bgra64be_full_2_c;
2723  *yuv2packed1 = yuv2bgra64be_full_1_c;
2724  } else
2725 #endif /* CONFIG_SWSCALE_ALPHA */
2726  {
2727  *yuv2packedX = yuv2bgrx64be_full_X_c;
2728  *yuv2packed2 = yuv2bgrx64be_full_2_c;
2729  *yuv2packed1 = yuv2bgrx64be_full_1_c;
2730  }
2731  break;
2732 
2733  case AV_PIX_FMT_RGB24:
2734  *yuv2packedX = yuv2rgb24_full_X_c;
2735  *yuv2packed2 = yuv2rgb24_full_2_c;
2736  *yuv2packed1 = yuv2rgb24_full_1_c;
2737  break;
2738  case AV_PIX_FMT_BGR24:
2739  *yuv2packedX = yuv2bgr24_full_X_c;
2740  *yuv2packed2 = yuv2bgr24_full_2_c;
2741  *yuv2packed1 = yuv2bgr24_full_1_c;
2742  break;
2743  case AV_PIX_FMT_RGB48LE:
2744  *yuv2packedX = yuv2rgb48le_full_X_c;
2745  *yuv2packed2 = yuv2rgb48le_full_2_c;
2746  *yuv2packed1 = yuv2rgb48le_full_1_c;
2747  break;
2748  case AV_PIX_FMT_BGR48LE:
2749  *yuv2packedX = yuv2bgr48le_full_X_c;
2750  *yuv2packed2 = yuv2bgr48le_full_2_c;
2751  *yuv2packed1 = yuv2bgr48le_full_1_c;
2752  break;
2753  case AV_PIX_FMT_RGB48BE:
2754  *yuv2packedX = yuv2rgb48be_full_X_c;
2755  *yuv2packed2 = yuv2rgb48be_full_2_c;
2756  *yuv2packed1 = yuv2rgb48be_full_1_c;
2757  break;
2758  case AV_PIX_FMT_BGR48BE:
2759  *yuv2packedX = yuv2bgr48be_full_X_c;
2760  *yuv2packed2 = yuv2bgr48be_full_2_c;
2761  *yuv2packed1 = yuv2bgr48be_full_1_c;
2762  break;
2763  case AV_PIX_FMT_BGR4_BYTE:
2764  *yuv2packedX = yuv2bgr4_byte_full_X_c;
2765  *yuv2packed2 = yuv2bgr4_byte_full_2_c;
2766  *yuv2packed1 = yuv2bgr4_byte_full_1_c;
2767  break;
2768  case AV_PIX_FMT_RGB4_BYTE:
2769  *yuv2packedX = yuv2rgb4_byte_full_X_c;
2770  *yuv2packed2 = yuv2rgb4_byte_full_2_c;
2771  *yuv2packed1 = yuv2rgb4_byte_full_1_c;
2772  break;
2773  case AV_PIX_FMT_BGR8:
2774  *yuv2packedX = yuv2bgr8_full_X_c;
2775  *yuv2packed2 = yuv2bgr8_full_2_c;
2776  *yuv2packed1 = yuv2bgr8_full_1_c;
2777  break;
2778  case AV_PIX_FMT_RGB8:
2779  *yuv2packedX = yuv2rgb8_full_X_c;
2780  *yuv2packed2 = yuv2rgb8_full_2_c;
2781  *yuv2packed1 = yuv2rgb8_full_1_c;
2782  break;
2783  case AV_PIX_FMT_GBRP:
2784  case AV_PIX_FMT_GBRP9BE:
2785  case AV_PIX_FMT_GBRP9LE:
2786  case AV_PIX_FMT_GBRP10BE:
2787  case AV_PIX_FMT_GBRP10LE:
2788  case AV_PIX_FMT_GBRP12BE:
2789  case AV_PIX_FMT_GBRP12LE:
2790  case AV_PIX_FMT_GBRP14BE:
2791  case AV_PIX_FMT_GBRP14LE:
2792  case AV_PIX_FMT_GBRAP:
2793  case AV_PIX_FMT_GBRAP10BE:
2794  case AV_PIX_FMT_GBRAP10LE:
2795  case AV_PIX_FMT_GBRAP12BE:
2796  case AV_PIX_FMT_GBRAP12LE:
2797  *yuv2anyX = yuv2gbrp_full_X_c;
2798  break;
2799  case AV_PIX_FMT_GBRP16BE:
2800  case AV_PIX_FMT_GBRP16LE:
2801  case AV_PIX_FMT_GBRAP16BE:
2802  case AV_PIX_FMT_GBRAP16LE:
2803  *yuv2anyX = yuv2gbrp16_full_X_c;
2804  break;
2805  case AV_PIX_FMT_GBRPF32BE:
2806  case AV_PIX_FMT_GBRPF32LE:
2807  case AV_PIX_FMT_GBRAPF32BE:
2808  case AV_PIX_FMT_GBRAPF32LE:
2809  *yuv2anyX = yuv2gbrpf32_full_X_c;
2810  break;
2811  }
2812  if (!*yuv2packedX && !*yuv2anyX)
2813  goto YUV_PACKED;
2814  } else {
2815  YUV_PACKED:
2816  switch (dstFormat) {
2817  case AV_PIX_FMT_RGBA64LE:
2818 #if CONFIG_SWSCALE_ALPHA
2819  if (c->needAlpha) {
2820  *yuv2packed1 = yuv2rgba64le_1_c;
2821  *yuv2packed2 = yuv2rgba64le_2_c;
2822  *yuv2packedX = yuv2rgba64le_X_c;
2823  } else
2824 #endif /* CONFIG_SWSCALE_ALPHA */
2825  {
2826  *yuv2packed1 = yuv2rgbx64le_1_c;
2827  *yuv2packed2 = yuv2rgbx64le_2_c;
2828  *yuv2packedX = yuv2rgbx64le_X_c;
2829  }
2830  break;
2831  case AV_PIX_FMT_RGBA64BE:
2832 #if CONFIG_SWSCALE_ALPHA
2833  if (c->needAlpha) {
2834  *yuv2packed1 = yuv2rgba64be_1_c;
2835  *yuv2packed2 = yuv2rgba64be_2_c;
2836  *yuv2packedX = yuv2rgba64be_X_c;
2837  } else
2838 #endif /* CONFIG_SWSCALE_ALPHA */
2839  {
2840  *yuv2packed1 = yuv2rgbx64be_1_c;
2841  *yuv2packed2 = yuv2rgbx64be_2_c;
2842  *yuv2packedX = yuv2rgbx64be_X_c;
2843  }
2844  break;
2845  case AV_PIX_FMT_BGRA64LE:
2846 #if CONFIG_SWSCALE_ALPHA
2847  if (c->needAlpha) {
2848  *yuv2packed1 = yuv2bgra64le_1_c;
2849  *yuv2packed2 = yuv2bgra64le_2_c;
2850  *yuv2packedX = yuv2bgra64le_X_c;
2851  } else
2852 #endif /* CONFIG_SWSCALE_ALPHA */
2853  {
2854  *yuv2packed1 = yuv2bgrx64le_1_c;
2855  *yuv2packed2 = yuv2bgrx64le_2_c;
2856  *yuv2packedX = yuv2bgrx64le_X_c;
2857  }
2858  break;
2859  case AV_PIX_FMT_BGRA64BE:
2860 #if CONFIG_SWSCALE_ALPHA
2861  if (c->needAlpha) {
2862  *yuv2packed1 = yuv2bgra64be_1_c;
2863  *yuv2packed2 = yuv2bgra64be_2_c;
2864  *yuv2packedX = yuv2bgra64be_X_c;
2865  } else
2866 #endif /* CONFIG_SWSCALE_ALPHA */
2867  {
2868  *yuv2packed1 = yuv2bgrx64be_1_c;
2869  *yuv2packed2 = yuv2bgrx64be_2_c;
2870  *yuv2packedX = yuv2bgrx64be_X_c;
2871  }
2872  break;
2873  case AV_PIX_FMT_RGB48LE:
2874  *yuv2packed1 = yuv2rgb48le_1_c;
2875  *yuv2packed2 = yuv2rgb48le_2_c;
2876  *yuv2packedX = yuv2rgb48le_X_c;
2877  break;
2878  case AV_PIX_FMT_RGB48BE:
2879  *yuv2packed1 = yuv2rgb48be_1_c;
2880  *yuv2packed2 = yuv2rgb48be_2_c;
2881  *yuv2packedX = yuv2rgb48be_X_c;
2882  break;
2883  case AV_PIX_FMT_BGR48LE:
2884  *yuv2packed1 = yuv2bgr48le_1_c;
2885  *yuv2packed2 = yuv2bgr48le_2_c;
2886  *yuv2packedX = yuv2bgr48le_X_c;
2887  break;
2888  case AV_PIX_FMT_BGR48BE:
2889  *yuv2packed1 = yuv2bgr48be_1_c;
2890  *yuv2packed2 = yuv2bgr48be_2_c;
2891  *yuv2packedX = yuv2bgr48be_X_c;
2892  break;
2893  case AV_PIX_FMT_RGB32:
2894  case AV_PIX_FMT_BGR32:
2895 #if CONFIG_SMALL
2896  *yuv2packed1 = yuv2rgb32_1_c;
2897  *yuv2packed2 = yuv2rgb32_2_c;
2898  *yuv2packedX = yuv2rgb32_X_c;
2899 #else
2900 #if CONFIG_SWSCALE_ALPHA
2901  if (c->needAlpha) {
2902  *yuv2packed1 = yuv2rgba32_1_c;
2903  *yuv2packed2 = yuv2rgba32_2_c;
2904  *yuv2packedX = yuv2rgba32_X_c;
2905  } else
2906 #endif /* CONFIG_SWSCALE_ALPHA */
2907  {
2908  *yuv2packed1 = yuv2rgbx32_1_c;
2909  *yuv2packed2 = yuv2rgbx32_2_c;
2910  *yuv2packedX = yuv2rgbx32_X_c;
2911  }
2912 #endif /* !CONFIG_SMALL */
2913  break;
2914  case AV_PIX_FMT_RGB32_1:
2915  case AV_PIX_FMT_BGR32_1:
2916 #if CONFIG_SMALL
2917  *yuv2packed1 = yuv2rgb32_1_1_c;
2918  *yuv2packed2 = yuv2rgb32_1_2_c;
2919  *yuv2packedX = yuv2rgb32_1_X_c;
2920 #else
2921 #if CONFIG_SWSCALE_ALPHA
2922  if (c->needAlpha) {
2923  *yuv2packed1 = yuv2rgba32_1_1_c;
2924  *yuv2packed2 = yuv2rgba32_1_2_c;
2925  *yuv2packedX = yuv2rgba32_1_X_c;
2926  } else
2927 #endif /* CONFIG_SWSCALE_ALPHA */
2928  {
2929  *yuv2packed1 = yuv2rgbx32_1_1_c;
2930  *yuv2packed2 = yuv2rgbx32_1_2_c;
2931  *yuv2packedX = yuv2rgbx32_1_X_c;
2932  }
2933 #endif /* !CONFIG_SMALL */
2934  break;
2935  case AV_PIX_FMT_RGB24:
2936  *yuv2packed1 = yuv2rgb24_1_c;
2937  *yuv2packed2 = yuv2rgb24_2_c;
2938  *yuv2packedX = yuv2rgb24_X_c;
2939  break;
2940  case AV_PIX_FMT_BGR24:
2941  *yuv2packed1 = yuv2bgr24_1_c;
2942  *yuv2packed2 = yuv2bgr24_2_c;
2943  *yuv2packedX = yuv2bgr24_X_c;
2944  break;
2945  case AV_PIX_FMT_RGB565LE:
2946  case AV_PIX_FMT_RGB565BE:
2947  case AV_PIX_FMT_BGR565LE:
2948  case AV_PIX_FMT_BGR565BE:
2949  *yuv2packed1 = yuv2rgb16_1_c;
2950  *yuv2packed2 = yuv2rgb16_2_c;
2951  *yuv2packedX = yuv2rgb16_X_c;
2952  break;
2953  case AV_PIX_FMT_RGB555LE:
2954  case AV_PIX_FMT_RGB555BE:
2955  case AV_PIX_FMT_BGR555LE:
2956  case AV_PIX_FMT_BGR555BE:
2957  *yuv2packed1 = yuv2rgb15_1_c;
2958  *yuv2packed2 = yuv2rgb15_2_c;
2959  *yuv2packedX = yuv2rgb15_X_c;
2960  break;
2961  case AV_PIX_FMT_RGB444LE:
2962  case AV_PIX_FMT_RGB444BE:
2963  case AV_PIX_FMT_BGR444LE:
2964  case AV_PIX_FMT_BGR444BE:
2965  *yuv2packed1 = yuv2rgb12_1_c;
2966  *yuv2packed2 = yuv2rgb12_2_c;
2967  *yuv2packedX = yuv2rgb12_X_c;
2968  break;
2969  case AV_PIX_FMT_RGB8:
2970  case AV_PIX_FMT_BGR8:
2971  *yuv2packed1 = yuv2rgb8_1_c;
2972  *yuv2packed2 = yuv2rgb8_2_c;
2973  *yuv2packedX = yuv2rgb8_X_c;
2974  break;
2975  case AV_PIX_FMT_RGB4:
2976  case AV_PIX_FMT_BGR4:
2977  *yuv2packed1 = yuv2rgb4_1_c;
2978  *yuv2packed2 = yuv2rgb4_2_c;
2979  *yuv2packedX = yuv2rgb4_X_c;
2980  break;
2981  case AV_PIX_FMT_RGB4_BYTE:
2982  case AV_PIX_FMT_BGR4_BYTE:
2983  *yuv2packed1 = yuv2rgb4b_1_c;
2984  *yuv2packed2 = yuv2rgb4b_2_c;
2985  *yuv2packedX = yuv2rgb4b_X_c;
2986  break;
2987  case AV_PIX_FMT_X2RGB10LE:
2988  case AV_PIX_FMT_X2RGB10BE:
2989  *yuv2packed1 = yuv2x2rgb10_1_c;
2990  *yuv2packed2 = yuv2x2rgb10_2_c;
2991  *yuv2packedX = yuv2x2rgb10_X_c;
2992  break;
2993  }
2994  }
2995  switch (dstFormat) {
2996  case AV_PIX_FMT_MONOWHITE:
2997  *yuv2packed1 = yuv2monowhite_1_c;
2998  *yuv2packed2 = yuv2monowhite_2_c;
2999  *yuv2packedX = yuv2monowhite_X_c;
3000  break;
3001  case AV_PIX_FMT_MONOBLACK:
3002  *yuv2packed1 = yuv2monoblack_1_c;
3003  *yuv2packed2 = yuv2monoblack_2_c;
3004  *yuv2packedX = yuv2monoblack_X_c;
3005  break;
3006  case AV_PIX_FMT_YUYV422:
3007  *yuv2packed1 = yuv2yuyv422_1_c;
3008  *yuv2packed2 = yuv2yuyv422_2_c;
3009  *yuv2packedX = yuv2yuyv422_X_c;
3010  break;
3011  case AV_PIX_FMT_YVYU422:
3012  *yuv2packed1 = yuv2yvyu422_1_c;
3013  *yuv2packed2 = yuv2yvyu422_2_c;
3014  *yuv2packedX = yuv2yvyu422_X_c;
3015  break;
3016  case AV_PIX_FMT_UYVY422:
3017  *yuv2packed1 = yuv2uyvy422_1_c;
3018  *yuv2packed2 = yuv2uyvy422_2_c;
3019  *yuv2packedX = yuv2uyvy422_X_c;
3020  break;
3021  case AV_PIX_FMT_YA8:
3022  *yuv2packed1 = yuv2ya8_1_c;
3023  *yuv2packed2 = yuv2ya8_2_c;
3024  *yuv2packedX = yuv2ya8_X_c;
3025  break;
3026  case AV_PIX_FMT_YA16LE:
3027  *yuv2packed1 = yuv2ya16le_1_c;
3028  *yuv2packed2 = yuv2ya16le_2_c;
3029  *yuv2packedX = yuv2ya16le_X_c;
3030  break;
3031  case AV_PIX_FMT_YA16BE:
3032  *yuv2packed1 = yuv2ya16be_1_c;
3033  *yuv2packed2 = yuv2ya16be_2_c;
3034  *yuv2packedX = yuv2ya16be_X_c;
3035  break;
3036  case AV_PIX_FMT_AYUV64LE:
3037  *yuv2packedX = yuv2ayuv64le_X_c;
3038  break;
3039  }
3040 }
packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1
Definition: pixfmt.h:81
IEEE-754 single precision Y, 32bpp, big-endian.
Definition: pixfmt.h:340
planar GBR 4:4:4:4 40bpp, little-endian
Definition: pixfmt.h:291
#define NULL
Definition: coverity.c:32
static av_always_inline void yuv2rgb_1_c_template(SwsContext *c, const int16_t *buf0, const int16_t *ubuf[2], const int16_t *vbuf[2], const int16_t *abuf0, uint8_t *dest, int dstW, int uvalpha, int y, enum AVPixelFormat target, int hasAlpha)
Definition: output.c:1742
static void yuv2ayuv64le_X_c(SwsContext *c, const int16_t *lumFilter, const int16_t **_lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **_chrUSrc, const int16_t **_chrVSrc, int chrFilterSize, const int16_t **_alpSrc, uint8_t *dest, int dstW, int y)
Definition: output.c:2495
av_cold void ff_sws_init_output_funcs(SwsContext *c, yuv2planar1_fn *yuv2plane1, yuv2planarX_fn *yuv2planeX, yuv2interleavedX_fn *yuv2nv12cX, yuv2packed1_fn *yuv2packed1, yuv2packed2_fn *yuv2packed2, yuv2packedX_fn *yuv2packedX, yuv2anyX_fn *yuv2anyX)
Definition: output.c:2543
static void yuv2gbrp16_full_X_c(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrcx, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrcx, const int16_t **chrVSrcx, int chrFilterSize, const int16_t **alpSrcx, uint8_t **dest, int dstW, int y)
Definition: output.c:2251
static int shift(int a, int b)
Definition: sonic.c:82
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2573
#define av_clip_uintp2
Definition: common.h:146
#define YUVRGB_TABLE_HEADROOM
#define X_DITHER(u, v)
static void yuv2p010lX_BE_c(const int16_t *filter, int filterSize, const int16_t **src, uint8_t *dest, int dstW, const uint8_t *dither, int offset)
Definition: output.c:526
8 bits gray, 8 bits alpha
Definition: pixfmt.h:143
packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is st...
Definition: pixfmt.h:208
packed RGB 10:10:10, 30bpp, (msb)2X 10R 10G 10B(lsb), big-endian, X=unused/undefined ...
Definition: pixfmt.h:362
#define A1
Definition: binkdsp.c:31
const char * desc
Definition: libsvtav1.c:79
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:68
packed RGB 1:2:1 bitstream, 4bpp, (msb)1B 2G 1R(lsb), a byte contains two pixels, the first pixel in ...
Definition: pixfmt.h:84
const char * g
Definition: vf_curves.c:117
#define accumulate_bit(acc, val)
Definition: output.c:536
int acc
Definition: yuv2rgb.c:555
static void yuv2rgb(uint8_t *out, int ridx, int Y, int U, int V)
Definition: g2meet.c:261
const uint8_t ff_dither_2x2_8[][8]
Definition: output.c:46
static av_always_inline void yuv2rgb_full_1_c_template(SwsContext *c, const int16_t *buf0, const int16_t *ubuf[2], const int16_t *vbuf[2], const int16_t *abuf0, uint8_t *dest, int dstW, int uvalpha, int y, enum AVPixelFormat target, int hasAlpha)
Definition: output.c:2088
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:168
packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb), little-endian, X=unused/undefined ...
Definition: pixfmt.h:108
packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is st...
Definition: pixfmt.h:207
static void yuv2p010l1_LE_c(const int16_t *src, uint8_t *dest, int dstW, const uint8_t *dither, int offset)
Definition: output.c:505
uint8_t * table_bU[256+2 *YUVRGB_TABLE_HEADROOM]
#define av_bswap16
Definition: bswap.h:31
static av_always_inline int is16BPS(enum AVPixelFormat pix_fmt)
Convenience header that includes libavutil&#39;s core.
packed BGR 5:6:5, 16bpp, (msb) 5B 6G 5R(lsb), little-endian
Definition: pixfmt.h:111
planar GBR 4:4:4 36bpp, little-endian
Definition: pixfmt.h:255
packed RGB 4:4:4, 16bpp, (msb)4X 4R 4G 4B(lsb), big-endian, X=unused/undefined
Definition: pixfmt.h:140
#define AV_PIX_FMT_RGB444
Definition: pixfmt.h:388
planar GBR 4:4:4 36bpp, big-endian
Definition: pixfmt.h:254
const uint8_t ff_dither_8x8_220[][8]
Definition: output.c:85
static av_always_inline void yuv2plane1_16_c_template(const int32_t *src, uint16_t *dest, int dstW, int big_endian, int output_bits)
Definition: output.c:145
#define r_b
static av_always_inline void yuv2planeX_16_c_template(const int16_t *filter, int filterSize, const int32_t **src, uint16_t *dest, int dstW, int big_endian, int output_bits)
Definition: output.c:159
packed RGB 1:2:1 bitstream, 4bpp, (msb)1R 2G 1B(lsb), a byte contains two pixels, the first pixel in ...
Definition: pixfmt.h:87
#define output_pixels(pos, Y1, U, Y2, V)
Definition: output.c:746
Macro definitions for various function/variable attributes.
#define b_r
packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), little-endian
Definition: pixfmt.h:106
packed RGB 1:2:1, 8bpp, (msb)1B 2G 1R(lsb)
Definition: pixfmt.h:85
const uint8_t ff_dither_8x8_32[][8]
Definition: output.c:60
planar GBRA 4:4:4:4 64bpp, big-endian
Definition: pixfmt.h:216
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
const uint8_t ff_dither_2x2_4[][8]
Definition: output.c:40
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:117
uint8_t
#define av_cold
Definition: attributes.h:88
#define AV_PIX_FMT_FLAG_ALPHA
The pixel format has an alpha channel.
Definition: pixdesc.h:179
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:64
const uint8_t ff_dither_4x4_16[][8]
Definition: output.c:52
packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as lit...
Definition: pixfmt.h:103
static av_always_inline void yuv2ya16_2_c_template(SwsContext *c, const int32_t *buf[2], const int32_t *unused_ubuf[2], const int32_t *unused_vbuf[2], const int32_t *abuf[2], uint16_t *dest, int dstW, int yalpha, int unused_uvalpha, int y, enum AVPixelFormat target, int unused_hasAlpha, int unused_eightbytes)
Definition: output.c:935
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is st...
Definition: pixfmt.h:205
static void yuv2ya8_2_c(SwsContext *c, const int16_t *buf[2], const int16_t *ubuf[2], const int16_t *vbuf[2], const int16_t *abuf[2], uint8_t *dest, int dstW, int yalpha, int uvalpha, int y)
Definition: output.c:2428
packed RGB 4:4:4, 16bpp, (msb)4X 4R 4G 4B(lsb), little-endian, X=unused/undefined ...
Definition: pixfmt.h:139
#define SWS_FULL_CHR_H_INT
Definition: swscale.h:79
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:264
packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), big-endian
Definition: pixfmt.h:105
packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
Definition: pixfmt.h:94
static void yuv2gbrp_full_X_c(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrc, const int16_t **chrVSrc, int chrFilterSize, const int16_t **alpSrc, uint8_t **dest, int dstW, int y)
Definition: output.c:2169
static av_always_inline void yuv2422_2_c_template(SwsContext *c, const int16_t *buf[2], const int16_t *ubuf[2], const int16_t *vbuf[2], const int16_t *abuf[2], uint8_t *dest, int dstW, int yalpha, int uvalpha, int y, enum AVPixelFormat target)
Definition: output.c:804
#define DECLARE_ALIGNED(n, t, v)
Declare a variable that is aligned in memory.
Definition: mem.h:117
#define av_clip
Definition: common.h:122
planar GBR 4:4:4 48bpp, big-endian
Definition: pixfmt.h:174
void(* yuv2planar1_fn)(const int16_t *src, uint8_t *dest, int dstW, const uint8_t *dither, int offset)
Write one line of horizontally scaled data to planar output without any additional vertical scaling (...
static av_always_inline void yuv2rgb_full_2_c_template(SwsContext *c, const int16_t *buf[2], const int16_t *ubuf[2], const int16_t *vbuf[2], const int16_t *abuf[2], uint8_t *dest, int dstW, int yalpha, int uvalpha, int y, enum AVPixelFormat target, int hasAlpha)
Definition: output.c:2043
external API header
enum AVPixelFormat dstFormat
Destination pixel format.
static av_always_inline void yuv2rgb_full_X_c_template(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrc, const int16_t **chrVSrc, int chrFilterSize, const int16_t **alpSrc, uint8_t *dest, int dstW, int y, enum AVPixelFormat target, int hasAlpha)
Definition: output.c:1993
uint8_t * table_gU[256+2 *YUVRGB_TABLE_HEADROOM]
static void yuv2p010l1_c(const int16_t *src, uint16_t *dest, int dstW, int big_endian)
Definition: output.c:451
#define A(x)
Definition: vp56_arith.h:28
#define yuv2planeX_float(template, dest_type, BE_LE)
Definition: output.c:292
int * dither_error[4]
#define U(x)
Definition: vp56_arith.h:37
#define src
Definition: vp8dsp.c:255
planar GBR 4:4:4 27bpp, big-endian
Definition: pixfmt.h:170
#define A2
Definition: binkdsp.c:32
#define B_R
Definition: output.c:888
#define AV_PIX_FMT_BGR32_1
Definition: pixfmt.h:375
static av_always_inline void yuv2rgba64_full_1_c_template(SwsContext *c, const int32_t *buf0, const int32_t *ubuf[2], const int32_t *vbuf[2], const int32_t *abuf0, uint16_t *dest, int dstW, int uvalpha, int y, enum AVPixelFormat target, int hasAlpha, int eightbytes)
Definition: output.c:1359
16 bits gray, 16 bits alpha (big-endian)
Definition: pixfmt.h:212
#define R
Definition: huffyuvdsp.h:34
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce then the filter should push the output frames on the output link immediately As an exception to the previous rule if the input frame is enough to produce several output frames then the filter needs output only at least one per link The additional frames can be left buffered in the filter
like NV12, with 16bpp per component, big-endian
Definition: pixfmt.h:301
static void yuv2p010l1_BE_c(const int16_t *src, uint8_t *dest, int dstW, const uint8_t *dither, int offset)
Definition: output.c:512
const uint8_t * d64
Definition: yuv2rgb.c:503
#define B
Definition: huffyuvdsp.h:32
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
Definition: pixfmt.h:95
static void yuv2nv12cX_c(enum AVPixelFormat dstFormat, const uint8_t *chrDither, const int16_t *chrFilter, int chrFilterSize, const int16_t **chrUSrc, const int16_t **chrVSrc, uint8_t *dest, int chrDstW)
Definition: output.c:407
const char * r
Definition: vf_curves.c:116
static const uint8_t dither[8][8]
Definition: vf_fspp.c:59
static av_always_inline void yuv2rgba64_X_c_template(SwsContext *c, const int16_t *lumFilter, const int32_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int32_t **chrUSrc, const int32_t **chrVSrc, int chrFilterSize, const int32_t **alpSrc, uint16_t *dest, int dstW, int y, enum AVPixelFormat target, int hasAlpha, int eightbytes)
Definition: output.c:993
static void yuv2p010lX_LE_c(const int16_t *filter, int filterSize, const int16_t **src, uint8_t *dest, int dstW, const uint8_t *dither, int offset)
Definition: output.c:519
planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
Definition: pixfmt.h:89
#define av_clip_int16
Definition: common.h:137
simple assert() macros that are a bit more flexible than ISO C assert().
like NV12, with 16bpp per component, little-endian
Definition: pixfmt.h:300
like NV12, with 10bpp per component, data in the high bits, zeros in the low bits, big-endian
Definition: pixfmt.h:285
static av_always_inline void yuv2planeX_10_c_template(const int16_t *filter, int filterSize, const int16_t **src, uint16_t *dest, int dstW, int big_endian, int output_bits)
Definition: output.c:335
static av_always_inline void yuv2plane1_float_c_template(const int32_t *src, float *dest, int dstW)
Definition: output.c:214
#define yuv2NBPS(bits, BE_LE, is_be, template_size, typeX_t)
Definition: output.c:355
packed ARGB 8:8:8:8, 32bpp, ARGBARGB...
Definition: pixfmt.h:92
packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as lit...
Definition: pixfmt.h:149
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
Definition: pixfmt.h:93
static av_always_inline void yuv2mono_2_c_template(SwsContext *c, const int16_t *buf[2], const int16_t *ubuf[2], const int16_t *vbuf[2], const int16_t *abuf[2], uint8_t *dest, int dstW, int yalpha, int uvalpha, int y, enum AVPixelFormat target)
Definition: output.c:600
static av_always_inline void yuv2422_1_c_template(SwsContext *c, const int16_t *buf0, const int16_t *ubuf[2], const int16_t *vbuf[2], const int16_t *abuf0, uint8_t *dest, int dstW, int uvalpha, int y, enum AVPixelFormat target)
Definition: output.c:837
uint64_t flags
Combination of AV_PIX_FMT_FLAG_...
Definition: pixdesc.h:106
planar YUV 4:4:4, 24bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
Definition: pixfmt.h:348
as above, but U and V bytes are swapped
Definition: pixfmt.h:90
planar GBR 4:4:4:4 48bpp, big-endian
Definition: pixfmt.h:287
#define b
Definition: input.c:41
static av_always_inline void yuv2planeX_float_c_template(const int16_t *filter, int filterSize, const int32_t **src, float *dest, int dstW)
Definition: output.c:246
planar GBR 4:4:4:4 40bpp, big-endian
Definition: pixfmt.h:290
#define Y
Definition: boxblur.h:38
IEEE-754 single precision planar GBR 4:4:4, 96bpp, little-endian.
Definition: pixfmt.h:319
static void yuv2ya8_X_c(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrc, const int16_t **chrVSrc, int chrFilterSize, const int16_t **alpSrc, uint8_t *dest, int dstW, int y)
Definition: output.c:2459
#define av_clip_uint16
Definition: common.h:134
packed RGB 1:2:1, 8bpp, (msb)1R 2G 1B(lsb)
Definition: pixfmt.h:88
static av_always_inline void yuv2mono_1_c_template(SwsContext *c, const int16_t *buf0, const int16_t *ubuf[2], const int16_t *vbuf[2], const int16_t *abuf0, uint8_t *dest, int dstW, int uvalpha, int y, enum AVPixelFormat target)
Definition: output.c:661
static av_always_inline void yuv2plane1_float_bswap_c_template(const int32_t *src, uint32_t *dest, int dstW)
Definition: output.c:230
static void yuv2plane1_8_c(const int16_t *src, uint8_t *dest, int dstW, const uint8_t *dither, int offset)
Definition: output.c:397
int32_t
packed YUV 4:2:2, 16bpp, Y0 Cr Y1 Cb
Definition: pixfmt.h:210
int table_gV[256+2 *YUVRGB_TABLE_HEADROOM]
#define AV_PIX_FMT_X2RGB10
Definition: pixfmt.h:452
static void yuv2p010cX_c(enum AVPixelFormat dstFormat, const uint8_t *chrDither, const int16_t *chrFilter, int chrFilterSize, const int16_t **chrUSrc, const int16_t **chrVSrc, uint8_t *dest8, int chrDstW)
Definition: output.c:481
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:69
uint8_t * table_rV[256+2 *YUVRGB_TABLE_HEADROOM]
like NV12, with 10bpp per component, data in the high bits, zeros in the low bits, little-endian
Definition: pixfmt.h:284
static av_always_inline void yuv2ya16_X_c_template(SwsContext *c, const int16_t *lumFilter, const int32_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int32_t **unused_chrUSrc, const int32_t **unused_chrVSrc, int unused_chrFilterSize, const int32_t **alpSrc, uint16_t *dest, int dstW, int y, enum AVPixelFormat target, int unused_hasAlpha, int unused_eightbytes)
Definition: output.c:897
packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as big...
Definition: pixfmt.h:148
packed BGR 5:6:5, 16bpp, (msb) 5B 6G 5R(lsb), big-endian
Definition: pixfmt.h:110
packed RGB 10:10:10, 30bpp, (msb)2X 10R 10G 10B(lsb), little-endian, X=unused/undefined ...
Definition: pixfmt.h:361
if(ret)
planar GBR 4:4:4:4 48bpp, little-endian
Definition: pixfmt.h:288
#define AV_PIX_FMT_BGR555
Definition: pixfmt.h:392
#define AV_PIX_FMT_BGR32
Definition: pixfmt.h:374
static av_always_inline int isBE(enum AVPixelFormat pix_fmt)
#define YUV2PACKED16WRAPPER(name, base, ext, fmt, hasAlpha, eightbytes)
Definition: output.c:1439
packed RGB 3:3:2, 8bpp, (msb)2B 3G 3R(lsb)
Definition: pixfmt.h:83
#define av_bswap32
Definition: bswap.h:33
void(* yuv2packedX_fn)(struct SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrc, const int16_t **chrVSrc, int chrFilterSize, const int16_t **alpSrc, uint8_t *dest, int dstW, int y)
Write one line of horizontally scaled Y/U/V/A to packed-pixel YUV/RGB output by doing multi-point ver...
static av_always_inline void yuv2mono_X_c_template(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrc, const int16_t **chrVSrc, int chrFilterSize, const int16_t **alpSrc, uint8_t *dest, int dstW, int y, enum AVPixelFormat target)
Definition: output.c:547
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
static void yuv2ya8_1_c(SwsContext *c, const int16_t *buf0, const int16_t *ubuf[2], const int16_t *vbuf[2], const int16_t *abuf0, uint8_t *dest, int dstW, int uvalpha, int y)
Definition: output.c:2402
planar GBR 4:4:4 30bpp, big-endian
Definition: pixfmt.h:172
#define AV_PIX_FMT_RGB32
Definition: pixfmt.h:372
void(* yuv2packed1_fn)(struct SwsContext *c, const int16_t *lumSrc, const int16_t *chrUSrc[2], const int16_t *chrVSrc[2], const int16_t *alpSrc, uint8_t *dest, int dstW, int uvalpha, int y)
Write one line of horizontally scaled Y/U/V/A to packed-pixel YUV/RGB output without any additional v...
static av_always_inline void yuv2rgb_2_c_template(SwsContext *c, const int16_t *buf[2], const int16_t *ubuf[2], const int16_t *vbuf[2], const int16_t *abuf[2], uint8_t *dest, int dstW, int yalpha, int uvalpha, int y, enum AVPixelFormat target, int hasAlpha)
Definition: output.c:1702
packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr
Definition: pixfmt.h:67
planar GBR 4:4:4 42bpp, little-endian
Definition: pixfmt.h:257
const uint8_t ff_dither_8x8_73[][8]
Definition: output.c:72
as above, but U and V bytes are swapped
Definition: pixfmt.h:349
IEEE-754 single precision planar GBR 4:4:4, 96bpp, big-endian.
Definition: pixfmt.h:318
static av_always_inline void yuv2422_X_c_template(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrc, const int16_t **chrVSrc, int chrFilterSize, const int16_t **alpSrc, uint8_t *dest, int dstW, int y, enum AVPixelFormat target)
Definition: output.c:765
void(* yuv2planarX_fn)(const int16_t *filter, int filterSize, const int16_t **src, uint8_t *dest, int dstW, const uint8_t *dither, int offset)
Write one line of horizontally scaled data to planar output with multi-point vertical scaling between...
byte swapping routines
const uint8_t * d32
Definition: yuv2rgb.c:502
planar GBR 4:4:4 42bpp, big-endian
Definition: pixfmt.h:256
static av_always_inline void yuv2rgb_write(uint8_t *_dest, int i, int Y1, int Y2, unsigned A1, unsigned A2, const void *_r, const void *_g, const void *_b, int y, enum AVPixelFormat target, int hasAlpha)
Definition: output.c:1522
packed BGR 5:5:5, 16bpp, (msb)1X 5B 5G 5R(lsb), little-endian, X=unused/undefined ...
Definition: pixfmt.h:113
#define SH(val, pdst)
static av_always_inline void yuv2rgba64_full_X_c_template(SwsContext *c, const int16_t *lumFilter, const int32_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int32_t **chrUSrc, const int32_t **chrVSrc, int chrFilterSize, const int32_t **alpSrc, uint16_t *dest, int dstW, int y, enum AVPixelFormat target, int hasAlpha, int eightbytes)
Definition: output.c:1243
static void yuv2p010lX_c(const int16_t *filter, int filterSize, const int16_t **src, uint16_t *dest, int dstW, int big_endian)
Definition: output.c:464
static av_always_inline uint32_t av_float2int(float f)
Reinterpret a float as a 32-bit integer.
Definition: intfloat.h:50
static av_always_inline void yuv2planeX_float_bswap_c_template(const int16_t *filter, int filterSize, const int32_t **src, uint32_t *dest, int dstW)
Definition: output.c:266
#define YUV2RGBWRAPPER(name, base, ext, fmt, hasAlpha)
Definition: output.c:1821
#define AV_PIX_FMT_BGR565
Definition: pixfmt.h:391
static av_always_inline void yuv2ya16_1_c_template(SwsContext *c, const int32_t *buf0, const int32_t *unused_ubuf[2], const int32_t *unused_vbuf[2], const int32_t *abuf0, uint16_t *dest, int dstW, int unused_uvalpha, int y, enum AVPixelFormat target, int unused_hasAlpha, int unused_eightbytes)
Definition: output.c:967
packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb), big-endian , X=unused/undefined
Definition: pixfmt.h:107
packed BGR 4:4:4, 16bpp, (msb)4X 4B 4G 4R(lsb), big-endian, X=unused/undefined
Definition: pixfmt.h:142
SwsDither dither
#define A_DITHER(u, v)
static void yuv2gbrpf32_full_X_c(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrcx, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrcx, const int16_t **chrVSrcx, int chrFilterSize, const int16_t **alpSrcx, uint8_t **dest, int dstW, int y)
Definition: output.c:2326
Y , 1bpp, 0 is black, 1 is white, in each byte pixels are ordered from the msb to the lsb...
Definition: pixfmt.h:76
static void FUNC() yuv2planeX(const int16_t *filter, int filterSize, const int16_t **src, uint8_t *dest, int dstW, const uint8_t *dither, int offset)
Y , 1bpp, 0 is white, 1 is black, in each byte pixels are ordered from the msb to the lsb...
Definition: pixfmt.h:75
#define G
Definition: huffyuvdsp.h:33
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:215
static av_always_inline void yuv2rgb_X_c_template(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrc, const int16_t **chrVSrc, int chrFilterSize, const int16_t **alpSrc, uint8_t *dest, int dstW, int y, enum AVPixelFormat target, int hasAlpha)
Definition: output.c:1648
IEEE-754 single precision planar GBRA 4:4:4:4, 128bpp, little-endian.
Definition: pixfmt.h:321
planar GBR 4:4:4 27bpp, little-endian
Definition: pixfmt.h:171
packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as big...
Definition: pixfmt.h:102
#define AV_PIX_FMT_BGR444
Definition: pixfmt.h:393
#define AV_WL16(p, v)
Definition: intreadwrite.h:412
void(* yuv2packed2_fn)(struct SwsContext *c, const int16_t *lumSrc[2], const int16_t *chrUSrc[2], const int16_t *chrVSrc[2], const int16_t *alpSrc[2], uint8_t *dest, int dstW, int yalpha, int uvalpha, int y)
Write one line of horizontally scaled Y/U/V/A to packed-pixel YUV/RGB output by doing bilinear scalin...
#define output_pixel(pos, val, bias, signedness)
Definition: output.c:889
const uint8_t * d128
Definition: yuv2rgb.c:554
packed RGB 3:3:2, 8bpp, (msb)2R 3G 3B(lsb)
Definition: pixfmt.h:86
#define AV_PIX_FMT_RGB555
Definition: pixfmt.h:387
static av_always_inline void yuv2rgb_write_full(SwsContext *c, uint8_t *dest, int i, int Y, int A, int U, int V, int y, enum AVPixelFormat target, int hasAlpha, int err[4])
Definition: output.c:1853
IEEE-754 single precision planar GBRA 4:4:4:4, 128bpp, big-endian.
Definition: pixfmt.h:320
static av_always_inline void yuv2rgba64_full_2_c_template(SwsContext *c, const int32_t *buf[2], const int32_t *ubuf[2], const int32_t *vbuf[2], const int32_t *abuf[2], uint16_t *dest, int dstW, int yalpha, int uvalpha, int y, enum AVPixelFormat target, int hasAlpha, int eightbytes)
Definition: output.c:1307
#define av_clip_uint8
Definition: common.h:128
static av_always_inline void yuv2rgba64_1_c_template(SwsContext *c, const int32_t *buf0, const int32_t *ubuf[2], const int32_t *vbuf[2], const int32_t *abuf0, uint16_t *dest, int dstW, int uvalpha, int y, enum AVPixelFormat target, int hasAlpha, int eightbytes)
Definition: output.c:1140
#define AV_PIX_FMT_RGB32_1
Definition: pixfmt.h:373
static av_always_inline void yuv2rgba64_2_c_template(SwsContext *c, const int32_t *buf[2], const int32_t *ubuf[2], const int32_t *vbuf[2], const int32_t *abuf[2], uint16_t *dest, int dstW, int yalpha, int uvalpha, int y, enum AVPixelFormat target, int hasAlpha, int eightbytes)
Definition: output.c:1075
16 bits gray, 16 bits alpha (little-endian)
Definition: pixfmt.h:213
void(* yuv2anyX_fn)(struct SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrc, const int16_t **chrVSrc, int chrFilterSize, const int16_t **alpSrc, uint8_t **dest, int dstW, int y)
Write one line of horizontally scaled Y/U/V/A to YUV/RGB output by doing multi-point vertical scaling...
#define AV_PIX_FMT_RGB565
Definition: pixfmt.h:386
packed BGR 5:5:5, 16bpp, (msb)1X 5B 5G 5R(lsb), big-endian , X=unused/undefined
Definition: pixfmt.h:112
#define R_B
Definition: output.c:887
#define av_always_inline
Definition: attributes.h:45
planar GBR 4:4:4 48bpp, little-endian
Definition: pixfmt.h:175
packed BGR 4:4:4, 16bpp, (msb)4X 4B 4G 4R(lsb), little-endian, X=unused/undefined ...
Definition: pixfmt.h:141
#define yuv2plane1_float(template, dest_type, BE_LE)
Definition: output.c:285
static void yuv2p016cX_c(enum AVPixelFormat dstFormat, const uint8_t *chrDither, const int16_t *chrFilter, int chrFilterSize, const int16_t **chrUSrc, const int16_t **chrVSrc, uint8_t *dest8, int chrDstW)
Definition: output.c:184
int depth
Number of bits in the component.
Definition: pixdesc.h:58
IEEE-754 single precision Y, 32bpp, little-endian.
Definition: pixfmt.h:341
planar GBRA 4:4:4:4 64bpp, little-endian
Definition: pixfmt.h:217
#define YUV2PACKEDWRAPPER(name, base, ext, fmt)
Definition: output.c:711
int flags
Flags passed by the user to select scaler algorithm, optimizations, subsampling, etc...
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
static double val(void *priv, double ch)
Definition: aeval.c:76
Definition: rpzaenc.c:58
void(* yuv2interleavedX_fn)(enum AVPixelFormat dstFormat, const uint8_t *chrDither, const int16_t *chrFilter, int chrFilterSize, const int16_t **chrUSrc, const int16_t **chrVSrc, uint8_t *dest, int dstW)
Write one line of horizontally scaled chroma to interleaved output with multi-point vertical scaling ...
static av_always_inline int isNBPS(enum AVPixelFormat pix_fmt)
int i
Definition: input.c:407
planar GBR 4:4:4 30bpp, little-endian
Definition: pixfmt.h:173
packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is st...
Definition: pixfmt.h:206
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
packed AYUV 4:4:4,64bpp (1 Cr & Cb sample per 1x1 Y & A samples), little-endian
Definition: pixfmt.h:279
#define V
Definition: avdct.c:30