FFmpeg
swscale.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2001-2011 Michael Niedermayer <michaelni@gmx.at>
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include <stdint.h>
22 #include <stdio.h>
23 #include <string.h>
24 
25 #include "libavutil/avassert.h"
26 #include "libavutil/bswap.h"
27 #include "libavutil/common.h"
28 #include "libavutil/cpu.h"
29 #include "libavutil/emms.h"
30 #include "libavutil/intreadwrite.h"
31 #include "libavutil/mem.h"
32 #include "libavutil/mem_internal.h"
33 #include "libavutil/pixdesc.h"
34 #include "config.h"
35 #include "swscale_internal.h"
36 #include "swscale.h"
37 
38 DECLARE_ALIGNED(8, const uint8_t, ff_dither_8x8_128)[9][8] = {
39  { 36, 68, 60, 92, 34, 66, 58, 90, },
40  { 100, 4, 124, 28, 98, 2, 122, 26, },
41  { 52, 84, 44, 76, 50, 82, 42, 74, },
42  { 116, 20, 108, 12, 114, 18, 106, 10, },
43  { 32, 64, 56, 88, 38, 70, 62, 94, },
44  { 96, 0, 120, 24, 102, 6, 126, 30, },
45  { 48, 80, 40, 72, 54, 86, 46, 78, },
46  { 112, 16, 104, 8, 118, 22, 110, 14, },
47  { 36, 68, 60, 92, 34, 66, 58, 90, },
48 };
49 
50 DECLARE_ALIGNED(8, static const uint8_t, sws_pb_64)[8] = {
51  64, 64, 64, 64, 64, 64, 64, 64
52 };
53 
54 static av_always_inline void fillPlane(uint8_t *plane, int stride, int width,
55  int height, int y, uint8_t val)
56 {
57  int i;
58  uint8_t *ptr = plane + stride * y;
59  for (i = 0; i < height; i++) {
60  memset(ptr, val, width);
61  ptr += stride;
62  }
63 }
64 
65 static void hScale16To19_c(SwsInternal *c, int16_t *_dst, int dstW,
66  const uint8_t *_src, const int16_t *filter,
67  const int32_t *filterPos, int filterSize)
68 {
69  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(c->opts.src_format);
70  int i;
71  int32_t *dst = (int32_t *) _dst;
72  const uint16_t *src = (const uint16_t *) _src;
73  int bits = desc->comp[0].depth - 1;
74  int sh = bits - 4;
75 
76  if ((isAnyRGB(c->opts.src_format) || c->opts.src_format==AV_PIX_FMT_PAL8) && desc->comp[0].depth<16) {
77  sh = 9;
78  } else if (desc->flags & AV_PIX_FMT_FLAG_FLOAT) { /* float input are process like uint 16bpc */
79  sh = 16 - 1 - 4;
80  }
81 
82  for (i = 0; i < dstW; i++) {
83  int j;
84  int srcPos = filterPos[i];
85  int val = 0;
86 
87  for (j = 0; j < filterSize; j++) {
88  val += src[srcPos + j] * filter[filterSize * i + j];
89  }
90  // filter=14 bit, input=16 bit, output=30 bit, >> 11 makes 19 bit
91  dst[i] = FFMIN(val >> sh, (1 << 19) - 1);
92  }
93 }
94 
95 static void hScale16To15_c(SwsInternal *c, int16_t *dst, int dstW,
96  const uint8_t *_src, const int16_t *filter,
97  const int32_t *filterPos, int filterSize)
98 {
99  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(c->opts.src_format);
100  int i;
101  const uint16_t *src = (const uint16_t *) _src;
102  int sh = desc->comp[0].depth - 1;
103 
104  if (sh<15) {
105  sh = isAnyRGB(c->opts.src_format) || c->opts.src_format==AV_PIX_FMT_PAL8 ? 13 : (desc->comp[0].depth - 1);
106  } else if (desc->flags & AV_PIX_FMT_FLAG_FLOAT) { /* float input are process like uint 16bpc */
107  sh = 16 - 1;
108  }
109 
110  for (i = 0; i < dstW; i++) {
111  int j;
112  int srcPos = filterPos[i];
113  int val = 0;
114 
115  for (j = 0; j < filterSize; j++) {
116  val += src[srcPos + j] * filter[filterSize * i + j];
117  }
118  // filter=14 bit, input=16 bit, output=30 bit, >> 15 makes 15 bit
119  dst[i] = FFMIN(val >> sh, (1 << 15) - 1);
120  }
121 }
122 
123 // bilinear / bicubic scaling
124 static void hScale8To15_c(SwsInternal *c, int16_t *dst, int dstW,
125  const uint8_t *src, const int16_t *filter,
126  const int32_t *filterPos, int filterSize)
127 {
128  int i;
129  for (i = 0; i < dstW; i++) {
130  int j;
131  int srcPos = filterPos[i];
132  int val = 0;
133  for (j = 0; j < filterSize; j++) {
134  val += ((int)src[srcPos + j]) * filter[filterSize * i + j];
135  }
136  dst[i] = FFMIN(val >> 7, (1 << 15) - 1); // the cubic equation does overflow ...
137  }
138 }
139 
140 static void hScale8To19_c(SwsInternal *c, int16_t *_dst, int dstW,
141  const uint8_t *src, const int16_t *filter,
142  const int32_t *filterPos, int filterSize)
143 {
144  int i;
145  int32_t *dst = (int32_t *) _dst;
146  for (i = 0; i < dstW; i++) {
147  int j;
148  int srcPos = filterPos[i];
149  int val = 0;
150  for (j = 0; j < filterSize; j++) {
151  val += ((int)src[srcPos + j]) * filter[filterSize * i + j];
152  }
153  dst[i] = FFMIN(val >> 3, (1 << 19) - 1); // the cubic equation does overflow ...
154  }
155 }
156 
157 // FIXME all pal and rgb srcFormats could do this conversion as well
158 // FIXME all scalers more complex than bilinear could do half of this transform
159 static void chrRangeToJpeg_c(int16_t *dstU, int16_t *dstV, int width,
160  uint32_t _coeff, int64_t _offset)
161 {
162  uint16_t coeff = _coeff;
163  int32_t offset = _offset;
164  int i;
165  for (i = 0; i < width; i++) {
166  int U = (dstU[i] * coeff + offset) >> 14;
167  int V = (dstV[i] * coeff + offset) >> 14;
168  dstU[i] = FFMIN(U, (1 << 15) - 1);
169  dstV[i] = FFMIN(V, (1 << 15) - 1);
170  }
171 }
172 
173 static void chrRangeFromJpeg_c(int16_t *dstU, int16_t *dstV, int width,
174  uint32_t _coeff, int64_t _offset)
175 {
176  uint16_t coeff = _coeff;
177  int32_t offset = _offset;
178  int i;
179  for (i = 0; i < width; i++) {
180  dstU[i] = (dstU[i] * coeff + offset) >> 14;
181  dstV[i] = (dstV[i] * coeff + offset) >> 14;
182  }
183 }
184 
185 static void lumRangeToJpeg_c(int16_t *dst, int width,
186  uint32_t _coeff, int64_t _offset)
187 {
188  uint16_t coeff = _coeff;
189  int32_t offset = _offset;
190  int i;
191  for (i = 0; i < width; i++) {
192  int Y = (dst[i] * coeff + offset) >> 14;
193  dst[i] = FFMIN(Y, (1 << 15) - 1);
194  }
195 }
196 
197 static void lumRangeFromJpeg_c(int16_t *dst, int width,
198  uint32_t _coeff, int64_t _offset)
199 {
200  uint16_t coeff = _coeff;
201  int32_t offset = _offset;
202  int i;
203  for (i = 0; i < width; i++)
204  dst[i] = (dst[i] * coeff + offset) >> 14;
205 }
206 
207 static void chrRangeToJpeg16_c(int16_t *_dstU, int16_t *_dstV, int width,
208  uint32_t coeff, int64_t offset)
209 {
210  int i;
211  int32_t *dstU = (int32_t *) _dstU;
212  int32_t *dstV = (int32_t *) _dstV;
213  for (i = 0; i < width; i++) {
214  int U = ((int64_t) dstU[i] * coeff + offset) >> 18;
215  int V = ((int64_t) dstV[i] * coeff + offset) >> 18;
216  dstU[i] = FFMIN(U, (1 << 19) - 1);
217  dstV[i] = FFMIN(V, (1 << 19) - 1);
218  }
219 }
220 
221 static void chrRangeFromJpeg16_c(int16_t *_dstU, int16_t *_dstV, int width,
222  uint32_t coeff, int64_t offset)
223 {
224  int i;
225  int32_t *dstU = (int32_t *) _dstU;
226  int32_t *dstV = (int32_t *) _dstV;
227  for (i = 0; i < width; i++) {
228  dstU[i] = ((int64_t) dstU[i] * coeff + offset) >> 18;
229  dstV[i] = ((int64_t) dstV[i] * coeff + offset) >> 18;
230  }
231 }
232 
233 static void lumRangeToJpeg16_c(int16_t *_dst, int width,
234  uint32_t coeff, int64_t offset)
235 {
236  int i;
237  int32_t *dst = (int32_t *) _dst;
238  for (i = 0; i < width; i++) {
239  int Y = ((int64_t) dst[i] * coeff + offset) >> 18;
240  dst[i] = FFMIN(Y, (1 << 19) - 1);
241  }
242 }
243 
244 static void lumRangeFromJpeg16_c(int16_t *_dst, int width,
245  uint32_t coeff, int64_t offset)
246 {
247  int i;
248  int32_t *dst = (int32_t *) _dst;
249  for (i = 0; i < width; i++)
250  dst[i] = ((int64_t) dst[i] * coeff + offset) >> 18;
251 }
252 
253 
254 #define DEBUG_SWSCALE_BUFFERS 0
255 #define DEBUG_BUFFERS(...) \
256  if (DEBUG_SWSCALE_BUFFERS) \
257  av_log(c, AV_LOG_DEBUG, __VA_ARGS__)
258 
259 int ff_swscale(SwsInternal *c, const uint8_t *const src[], const int srcStride[],
260  int srcSliceY, int srcSliceH, uint8_t *const dst[],
261  const int dstStride[], int dstSliceY, int dstSliceH)
262 {
263  const int scale_dst = dstSliceY > 0 || dstSliceH < c->opts.dst_h;
264 
265  /* load a few things into local vars to make the code more readable?
266  * and faster */
267  const int dstW = c->opts.dst_w;
268  int dstH = c->opts.dst_h;
269 
270  const enum AVPixelFormat dstFormat = c->opts.dst_format;
271  const int flags = c->opts.flags;
272  int32_t *vLumFilterPos = c->vLumFilterPos;
273  int32_t *vChrFilterPos = c->vChrFilterPos;
274 
275  const int vLumFilterSize = c->vLumFilterSize;
276  const int vChrFilterSize = c->vChrFilterSize;
277 
278  yuv2planar1_fn yuv2plane1 = c->yuv2plane1;
279  yuv2planarX_fn yuv2planeX = c->yuv2planeX;
280  yuv2interleavedX_fn yuv2nv12cX = c->yuv2nv12cX;
281  yuv2packed1_fn yuv2packed1 = c->yuv2packed1;
282  yuv2packed2_fn yuv2packed2 = c->yuv2packed2;
283  yuv2packedX_fn yuv2packedX = c->yuv2packedX;
284  yuv2anyX_fn yuv2anyX = c->yuv2anyX;
285  const int chrSrcSliceY = srcSliceY >> c->chrSrcVSubSample;
286  const int chrSrcSliceH = AV_CEIL_RSHIFT(srcSliceH, c->chrSrcVSubSample);
287  int should_dither = isNBPS(c->opts.src_format) ||
288  is16BPS(c->opts.src_format);
289  int lastDstY;
290 
291  /* vars which will change and which we need to store back in the context */
292  int dstY = c->dstY;
293  int lastInLumBuf = c->lastInLumBuf;
294  int lastInChrBuf = c->lastInChrBuf;
295 
296  int lumStart = 0;
297  int lumEnd = c->descIndex[0];
298  int chrStart = lumEnd;
299  int chrEnd = c->descIndex[1];
300  int vStart = chrEnd;
301  int vEnd = c->numDesc;
302  SwsSlice *src_slice = &c->slice[lumStart];
303  SwsSlice *hout_slice = &c->slice[c->numSlice-2];
304  SwsSlice *vout_slice = &c->slice[c->numSlice-1];
305  SwsFilterDescriptor *desc = c->desc;
306 
307  int needAlpha = c->needAlpha;
308 
309  int hasLumHoles = 1;
310  int hasChrHoles = 1;
311 
312  const uint8_t *src2[4];
313  int srcStride2[4];
314 
315  if (isPacked(c->opts.src_format)) {
316  src2[0] =
317  src2[1] =
318  src2[2] =
319  src2[3] = src[0];
320  srcStride2[0] =
321  srcStride2[1] =
322  srcStride2[2] =
323  srcStride2[3] = srcStride[0];
324  } else {
325  memcpy(src2, src, sizeof(src2));
326  memcpy(srcStride2, srcStride, sizeof(srcStride2));
327  }
328 
329  srcStride2[1] *= 1 << c->vChrDrop;
330  srcStride2[2] *= 1 << c->vChrDrop;
331 
332  DEBUG_BUFFERS("swscale() %p[%d] %p[%d] %p[%d] %p[%d] -> %p[%d] %p[%d] %p[%d] %p[%d]\n",
333  src2[0], srcStride2[0], src2[1], srcStride2[1],
334  src2[2], srcStride2[2], src2[3], srcStride2[3],
335  dst[0], dstStride[0], dst[1], dstStride[1],
336  dst[2], dstStride[2], dst[3], dstStride[3]);
337  DEBUG_BUFFERS("srcSliceY: %d srcSliceH: %d dstY: %d dstH: %d\n",
338  srcSliceY, srcSliceH, dstY, dstH);
339  DEBUG_BUFFERS("vLumFilterSize: %d vChrFilterSize: %d\n",
340  vLumFilterSize, vChrFilterSize);
341 
342  if (dstStride[0]&15 || dstStride[1]&15 ||
343  dstStride[2]&15 || dstStride[3]&15) {
344  SwsInternal *const ctx = c->parent ? sws_internal(c->parent) : c;
345  if (flags & SWS_PRINT_INFO &&
346  !atomic_exchange_explicit(&ctx->stride_unaligned_warned, 1, memory_order_relaxed)) {
348  "Warning: dstStride is not aligned!\n"
349  " ->cannot do aligned memory accesses anymore\n");
350  }
351  }
352 
353 #if ARCH_X86
354  if ( (uintptr_t) dst[0]&15 || (uintptr_t) dst[1]&15 || (uintptr_t) dst[2]&15
355  || (uintptr_t)src2[0]&15 || (uintptr_t)src2[1]&15 || (uintptr_t)src2[2]&15
356  || dstStride[0]&15 || dstStride[1]&15 || dstStride[2]&15 || dstStride[3]&15
357  || srcStride2[0]&15 || srcStride2[1]&15 || srcStride2[2]&15 || srcStride2[3]&15
358  ) {
359  SwsInternal *const ctx = c->parent ? sws_internal(c->parent) : c;
360  int cpu_flags = av_get_cpu_flags();
361  if (flags & SWS_PRINT_INFO && HAVE_MMXEXT && (cpu_flags & AV_CPU_FLAG_SSE2) &&
362  !atomic_exchange_explicit(&ctx->stride_unaligned_warned,1, memory_order_relaxed)) {
363  av_log(c, AV_LOG_WARNING, "Warning: data is not aligned! This can lead to a speed loss\n");
364  }
365  }
366 #endif
367 
368  if (scale_dst) {
369  dstY = dstSliceY;
370  dstH = dstY + dstSliceH;
371  lastInLumBuf = -1;
372  lastInChrBuf = -1;
373  } else if (srcSliceY == 0) {
374  /* Note the user might start scaling the picture in the middle so this
375  * will not get executed. This is not really intended but works
376  * currently, so people might do it. */
377  dstY = 0;
378  lastInLumBuf = -1;
379  lastInChrBuf = -1;
380  }
381 
382  if (!should_dither) {
383  c->chrDither8 = c->lumDither8 = sws_pb_64;
384  }
385  lastDstY = dstY;
386 
387  ff_init_vscale_pfn(c, yuv2plane1, yuv2planeX, yuv2nv12cX,
388  yuv2packed1, yuv2packed2, yuv2packedX, yuv2anyX, c->use_mmx_vfilter);
389 
390  ff_init_slice_from_src(src_slice, (uint8_t**)src2, srcStride2, c->opts.src_w,
391  srcSliceY, srcSliceH, chrSrcSliceY, chrSrcSliceH, 1);
392 
393  ff_init_slice_from_src(vout_slice, (uint8_t**)dst, dstStride, c->opts.dst_w,
394  dstY, dstSliceH, dstY >> c->chrDstVSubSample,
395  AV_CEIL_RSHIFT(dstSliceH, c->chrDstVSubSample), scale_dst);
396  if (srcSliceY == 0) {
397  hout_slice->plane[0].sliceY = lastInLumBuf + 1;
398  hout_slice->plane[1].sliceY = lastInChrBuf + 1;
399  hout_slice->plane[2].sliceY = lastInChrBuf + 1;
400  hout_slice->plane[3].sliceY = lastInLumBuf + 1;
401 
402  hout_slice->plane[0].sliceH =
403  hout_slice->plane[1].sliceH =
404  hout_slice->plane[2].sliceH =
405  hout_slice->plane[3].sliceH = 0;
406  hout_slice->width = dstW;
407  }
408 
409  for (; dstY < dstH; dstY++) {
410  const int chrDstY = dstY >> c->chrDstVSubSample;
411  int use_mmx_vfilter= c->use_mmx_vfilter;
412 
413  // First line needed as input
414  const int firstLumSrcY = FFMAX(1 - vLumFilterSize, vLumFilterPos[dstY]);
415  const int firstLumSrcY2 = FFMAX(1 - vLumFilterSize, vLumFilterPos[FFMIN(dstY | ((1 << c->chrDstVSubSample) - 1), c->opts.dst_h - 1)]);
416  // First line needed as input
417  const int firstChrSrcY = FFMAX(1 - vChrFilterSize, vChrFilterPos[chrDstY]);
418 
419  // Last line needed as input
420  int lastLumSrcY = FFMIN(c->opts.src_h, firstLumSrcY + vLumFilterSize) - 1;
421  int lastLumSrcY2 = FFMIN(c->opts.src_h, firstLumSrcY2 + vLumFilterSize) - 1;
422  int lastChrSrcY = FFMIN(c->chrSrcH, firstChrSrcY + vChrFilterSize) - 1;
423  int enough_lines;
424 
425  int i;
426  int posY, cPosY, firstPosY, lastPosY, firstCPosY, lastCPosY;
427 
428  // handle holes (FAST_BILINEAR & weird filters)
429  if (firstLumSrcY > lastInLumBuf) {
430 
431  hasLumHoles = lastInLumBuf != firstLumSrcY - 1;
432  if (hasLumHoles) {
433  hout_slice->plane[0].sliceY = firstLumSrcY;
434  hout_slice->plane[3].sliceY = firstLumSrcY;
435  hout_slice->plane[0].sliceH =
436  hout_slice->plane[3].sliceH = 0;
437  }
438 
439  lastInLumBuf = firstLumSrcY - 1;
440  }
441  if (firstChrSrcY > lastInChrBuf) {
442 
443  hasChrHoles = lastInChrBuf != firstChrSrcY - 1;
444  if (hasChrHoles) {
445  hout_slice->plane[1].sliceY = firstChrSrcY;
446  hout_slice->plane[2].sliceY = firstChrSrcY;
447  hout_slice->plane[1].sliceH =
448  hout_slice->plane[2].sliceH = 0;
449  }
450 
451  lastInChrBuf = firstChrSrcY - 1;
452  }
453 
454  DEBUG_BUFFERS("dstY: %d\n", dstY);
455  DEBUG_BUFFERS("\tfirstLumSrcY: %d lastLumSrcY: %d lastInLumBuf: %d\n",
456  firstLumSrcY, lastLumSrcY, lastInLumBuf);
457  DEBUG_BUFFERS("\tfirstChrSrcY: %d lastChrSrcY: %d lastInChrBuf: %d\n",
458  firstChrSrcY, lastChrSrcY, lastInChrBuf);
459 
460  // Do we have enough lines in this slice to output the dstY line
461  enough_lines = lastLumSrcY2 < srcSliceY + srcSliceH &&
462  lastChrSrcY < AV_CEIL_RSHIFT(srcSliceY + srcSliceH, c->chrSrcVSubSample);
463 
464  if (!enough_lines) {
465  lastLumSrcY = srcSliceY + srcSliceH - 1;
466  lastChrSrcY = chrSrcSliceY + chrSrcSliceH - 1;
467  DEBUG_BUFFERS("buffering slice: lastLumSrcY %d lastChrSrcY %d\n",
468  lastLumSrcY, lastChrSrcY);
469  }
470 
471  av_assert0((lastLumSrcY - firstLumSrcY + 1) <= hout_slice->plane[0].available_lines);
472  av_assert0((lastChrSrcY - firstChrSrcY + 1) <= hout_slice->plane[1].available_lines);
473 
474 
475  posY = hout_slice->plane[0].sliceY + hout_slice->plane[0].sliceH;
476  if (posY <= lastLumSrcY && !hasLumHoles) {
477  firstPosY = FFMAX(firstLumSrcY, posY);
478  lastPosY = FFMIN(firstLumSrcY + hout_slice->plane[0].available_lines - 1, srcSliceY + srcSliceH - 1);
479  } else {
480  firstPosY = posY;
481  lastPosY = lastLumSrcY;
482  }
483 
484  cPosY = hout_slice->plane[1].sliceY + hout_slice->plane[1].sliceH;
485  if (cPosY <= lastChrSrcY && !hasChrHoles) {
486  firstCPosY = FFMAX(firstChrSrcY, cPosY);
487  lastCPosY = FFMIN(firstChrSrcY + hout_slice->plane[1].available_lines - 1, AV_CEIL_RSHIFT(srcSliceY + srcSliceH, c->chrSrcVSubSample) - 1);
488  } else {
489  firstCPosY = cPosY;
490  lastCPosY = lastChrSrcY;
491  }
492 
493  ff_rotate_slice(hout_slice, lastPosY, lastCPosY);
494 
495  if (posY < lastLumSrcY + 1) {
496  for (i = lumStart; i < lumEnd; ++i)
497  desc[i].process(c, &desc[i], firstPosY, lastPosY - firstPosY + 1);
498  }
499 
500  lastInLumBuf = lastLumSrcY;
501 
502  if (cPosY < lastChrSrcY + 1) {
503  for (i = chrStart; i < chrEnd; ++i)
504  desc[i].process(c, &desc[i], firstCPosY, lastCPosY - firstCPosY + 1);
505  }
506 
507  lastInChrBuf = lastChrSrcY;
508 
509  if (!enough_lines)
510  break; // we can't output a dstY line so let's try with the next slice
511 
512 #if HAVE_MMX_INLINE
514  c->dstW_mmx = c->opts.dst_w;
515 #endif
516  if (should_dither) {
517  c->chrDither8 = ff_dither_8x8_128[chrDstY & 7];
518  c->lumDither8 = ff_dither_8x8_128[dstY & 7];
519  }
520  if (dstY >= c->opts.dst_h - 2) {
521  /* hmm looks like we can't use MMX here without overwriting
522  * this array's tail */
523  ff_sws_init_output_funcs(c, &yuv2plane1, &yuv2planeX, &yuv2nv12cX,
524  &yuv2packed1, &yuv2packed2, &yuv2packedX, &yuv2anyX);
525  use_mmx_vfilter= 0;
526  ff_init_vscale_pfn(c, yuv2plane1, yuv2planeX, yuv2nv12cX,
527  yuv2packed1, yuv2packed2, yuv2packedX, yuv2anyX, use_mmx_vfilter);
528  }
529 
530  for (i = vStart; i < vEnd; ++i)
531  desc[i].process(c, &desc[i], dstY, 1);
532  }
533  if (isPlanar(dstFormat) && isALPHA(dstFormat) && !needAlpha) {
534  int offset = lastDstY - dstSliceY;
535  int length = dstW;
536  int height = dstY - lastDstY;
537 
538  if (is16BPS(dstFormat) || isNBPS(dstFormat)) {
539  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(dstFormat);
540  fillPlane16(dst[3], dstStride[3], length, height, offset,
541  1, desc->comp[3].depth,
542  isBE(dstFormat));
543  } else if (is32BPS(dstFormat)) {
544  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(dstFormat);
545  fillPlane32(dst[3], dstStride[3], length, height, offset,
546  1, desc->comp[3].depth,
547  isBE(dstFormat), desc->flags & AV_PIX_FMT_FLAG_FLOAT);
548  } else
549  fillPlane(dst[3], dstStride[3], length, height, offset, 255);
550  }
551 
552 #if HAVE_MMXEXT_INLINE
554  __asm__ volatile ("sfence" ::: "memory");
555 #endif
556  emms_c();
557 
558  /* store changed local vars back in the context */
559  c->dstY = dstY;
560  c->lastInLumBuf = lastInLumBuf;
561  c->lastInChrBuf = lastInChrBuf;
562 
563  return dstY - lastDstY;
564 }
565 
566 /*
567  * Solve for coeff and offset:
568  * dst = ((src << src_shift) * coeff + offset) >> (mult_shift + src_shift)
569  *
570  * If SwsInternal->dstBpc is > 14, coeff is uint16_t and offset is int32_t,
571  * otherwise (SwsInternal->dstBpc is <= 14) coeff is uint32_t and offset is
572  * int64_t.
573  */
574 static void solve_range_convert(uint16_t src_min, uint16_t src_max,
575  uint16_t dst_min, uint16_t dst_max,
576  int src_bits, int src_shift, int mult_shift,
577  uint32_t *coeff, int64_t *offset)
578 {
579  uint16_t src_range = src_max - src_min;
580  uint16_t dst_range = dst_max - dst_min;
581  int total_shift = mult_shift + src_shift;
582  *coeff = AV_CEIL_RSHIFT(((uint64_t) dst_range << total_shift) / src_range, src_shift);
583  *offset = ((int64_t) dst_max << total_shift) -
584  ((int64_t) src_max << src_shift) * *coeff +
585  (1U << (mult_shift - 1));
586 }
587 
589 {
590  const int bit_depth = c->dstBpc ? FFMIN(c->dstBpc, 16) : 8;
591  const int src_bits = bit_depth <= 14 ? 15 : 19;
592  const int src_shift = src_bits - bit_depth;
593  const int mult_shift = bit_depth <= 14 ? 14 : 18;
594  const uint16_t mpeg_min = 16U << (bit_depth - 8);
595  const uint16_t mpeg_max_lum = 235U << (bit_depth - 8);
596  const uint16_t mpeg_max_chr = 240U << (bit_depth - 8);
597  const uint16_t jpeg_max = (1U << bit_depth) - 1;
598  uint16_t src_min, src_max_lum, src_max_chr;
599  uint16_t dst_min, dst_max_lum, dst_max_chr;
600  if (c->opts.src_range) {
601  src_min = 0;
602  src_max_lum = jpeg_max;
603  src_max_chr = jpeg_max;
604  dst_min = mpeg_min;
605  dst_max_lum = mpeg_max_lum;
606  dst_max_chr = mpeg_max_chr;
607  } else {
608  src_min = mpeg_min;
609  src_max_lum = mpeg_max_lum;
610  src_max_chr = mpeg_max_chr;
611  dst_min = 0;
612  dst_max_lum = jpeg_max;
613  dst_max_chr = jpeg_max;
614  }
615  solve_range_convert(src_min, src_max_lum, dst_min, dst_max_lum,
616  src_bits, src_shift, mult_shift,
617  &c->lumConvertRange_coeff, &c->lumConvertRange_offset);
618  solve_range_convert(src_min, src_max_chr, dst_min, dst_max_chr,
619  src_bits, src_shift, mult_shift,
620  &c->chrConvertRange_coeff, &c->chrConvertRange_offset);
621 }
622 
624 {
625  c->lumConvertRange = NULL;
626  c->chrConvertRange = NULL;
627  if (c->opts.src_range != c->opts.dst_range && !isAnyRGB(c->opts.dst_format) && c->dstBpc < 32) {
629  if (c->dstBpc <= 14) {
630  if (c->opts.src_range) {
631  c->lumConvertRange = lumRangeFromJpeg_c;
632  c->chrConvertRange = chrRangeFromJpeg_c;
633  } else {
634  c->lumConvertRange = lumRangeToJpeg_c;
635  c->chrConvertRange = chrRangeToJpeg_c;
636  }
637  } else {
638  if (c->opts.src_range) {
639  c->lumConvertRange = lumRangeFromJpeg16_c;
640  c->chrConvertRange = chrRangeFromJpeg16_c;
641  } else {
642  c->lumConvertRange = lumRangeToJpeg16_c;
643  c->chrConvertRange = chrRangeToJpeg16_c;
644  }
645  }
646 
647 #if ARCH_AARCH64
649 #elif ARCH_LOONGARCH64
651 #elif ARCH_RISCV
653 #elif ARCH_X86
655 #endif
656  }
657 }
658 
660 {
661  enum AVPixelFormat srcFormat = c->opts.src_format;
662 
663  ff_sws_init_output_funcs(c, &c->yuv2plane1, &c->yuv2planeX,
664  &c->yuv2nv12cX, &c->yuv2packed1,
665  &c->yuv2packed2, &c->yuv2packedX, &c->yuv2anyX);
666 
667  ff_sws_init_input_funcs(c, &c->lumToYV12, &c->alpToYV12, &c->chrToYV12,
668  &c->readLumPlanar, &c->readAlpPlanar, &c->readChrPlanar);
669 
670  if (c->srcBpc == 8) {
671  if (c->dstBpc <= 14) {
672  c->hyScale = c->hcScale = hScale8To15_c;
673  if (c->opts.flags & SWS_FAST_BILINEAR) {
674  c->hyscale_fast = ff_hyscale_fast_c;
675  c->hcscale_fast = ff_hcscale_fast_c;
676  }
677  } else {
678  c->hyScale = c->hcScale = hScale8To19_c;
679  }
680  } else {
681  c->hyScale = c->hcScale = c->dstBpc > 14 ? hScale16To19_c
682  : hScale16To15_c;
683  }
684 
686 
687  if (!(isGray(srcFormat) || isGray(c->opts.dst_format) ||
688  srcFormat == AV_PIX_FMT_MONOBLACK || srcFormat == AV_PIX_FMT_MONOWHITE))
689  c->needs_hcscale = 1;
690 }
691 
693 {
695 
696 #if ARCH_PPC
698 #elif ARCH_X86
700 #elif ARCH_AARCH64
702 #elif ARCH_ARM
704 #elif ARCH_LOONGARCH64
706 #elif ARCH_RISCV
708 #endif
709 }
710 
711 static void reset_ptr(const uint8_t *src[], enum AVPixelFormat format)
712 {
713  if (!isALPHA(format))
714  src[3] = NULL;
715  if (!isPlanar(format)) {
716  src[3] = src[2] = NULL;
717 
718  if (!usePal(format))
719  src[1] = NULL;
720  }
721 }
722 
723 static int check_image_pointers(const uint8_t * const data[4], enum AVPixelFormat pix_fmt,
724  const int linesizes[4])
725 {
727  int i;
728 
729  av_assert2(desc);
730 
731  for (i = 0; i < 4; i++) {
732  int plane = desc->comp[i].plane;
733  if (!data[plane] || !linesizes[plane])
734  return 0;
735  }
736 
737  return 1;
738 }
739 
740 void ff_xyz12Torgb48(const SwsInternal *c, uint8_t *dst, int dst_stride,
741  const uint8_t *src, int src_stride, int w, int h)
742 {
743  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(c->opts.src_format);
744 
745  for (int yp = 0; yp < h; yp++) {
746  const uint16_t *src16 = (const uint16_t *) src;
747  uint16_t *dst16 = (uint16_t *) dst;
748 
749  for (int xp = 0; xp < 3 * w; xp += 3) {
750  int x, y, z, r, g, b;
751 
752  if (desc->flags & AV_PIX_FMT_FLAG_BE) {
753  x = AV_RB16(src16 + xp + 0);
754  y = AV_RB16(src16 + xp + 1);
755  z = AV_RB16(src16 + xp + 2);
756  } else {
757  x = AV_RL16(src16 + xp + 0);
758  y = AV_RL16(src16 + xp + 1);
759  z = AV_RL16(src16 + xp + 2);
760  }
761 
762  x = c->xyzgamma[x >> 4];
763  y = c->xyzgamma[y >> 4];
764  z = c->xyzgamma[z >> 4];
765 
766  // convert from XYZlinear to sRGBlinear
767  r = c->xyz2rgb_matrix[0][0] * x +
768  c->xyz2rgb_matrix[0][1] * y +
769  c->xyz2rgb_matrix[0][2] * z >> 12;
770  g = c->xyz2rgb_matrix[1][0] * x +
771  c->xyz2rgb_matrix[1][1] * y +
772  c->xyz2rgb_matrix[1][2] * z >> 12;
773  b = c->xyz2rgb_matrix[2][0] * x +
774  c->xyz2rgb_matrix[2][1] * y +
775  c->xyz2rgb_matrix[2][2] * z >> 12;
776 
777  // limit values to 16-bit depth
778  r = av_clip_uint16(r);
779  g = av_clip_uint16(g);
780  b = av_clip_uint16(b);
781 
782  // convert from sRGBlinear to RGB and scale from 12bit to 16bit
783  if (desc->flags & AV_PIX_FMT_FLAG_BE) {
784  AV_WB16(dst16 + xp + 0, c->rgbgamma[r] << 4);
785  AV_WB16(dst16 + xp + 1, c->rgbgamma[g] << 4);
786  AV_WB16(dst16 + xp + 2, c->rgbgamma[b] << 4);
787  } else {
788  AV_WL16(dst16 + xp + 0, c->rgbgamma[r] << 4);
789  AV_WL16(dst16 + xp + 1, c->rgbgamma[g] << 4);
790  AV_WL16(dst16 + xp + 2, c->rgbgamma[b] << 4);
791  }
792  }
793 
794  src += src_stride;
795  dst += dst_stride;
796  }
797 }
798 
799 void ff_rgb48Toxyz12(const SwsInternal *c, uint8_t *dst, int dst_stride,
800  const uint8_t *src, int src_stride, int w, int h)
801 {
802  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(c->opts.dst_format);
803 
804  for (int yp = 0; yp < h; yp++) {
805  uint16_t *src16 = (uint16_t *) src;
806  uint16_t *dst16 = (uint16_t *) dst;
807 
808  for (int xp = 0; xp < 3 * w; xp += 3) {
809  int x, y, z, r, g, b;
810 
811  if (desc->flags & AV_PIX_FMT_FLAG_BE) {
812  r = AV_RB16(src16 + xp + 0);
813  g = AV_RB16(src16 + xp + 1);
814  b = AV_RB16(src16 + xp + 2);
815  } else {
816  r = AV_RL16(src16 + xp + 0);
817  g = AV_RL16(src16 + xp + 1);
818  b = AV_RL16(src16 + xp + 2);
819  }
820 
821  r = c->rgbgammainv[r>>4];
822  g = c->rgbgammainv[g>>4];
823  b = c->rgbgammainv[b>>4];
824 
825  // convert from sRGBlinear to XYZlinear
826  x = c->rgb2xyz_matrix[0][0] * r +
827  c->rgb2xyz_matrix[0][1] * g +
828  c->rgb2xyz_matrix[0][2] * b >> 12;
829  y = c->rgb2xyz_matrix[1][0] * r +
830  c->rgb2xyz_matrix[1][1] * g +
831  c->rgb2xyz_matrix[1][2] * b >> 12;
832  z = c->rgb2xyz_matrix[2][0] * r +
833  c->rgb2xyz_matrix[2][1] * g +
834  c->rgb2xyz_matrix[2][2] * b >> 12;
835 
836  // limit values to 16-bit depth
837  x = av_clip_uint16(x);
838  y = av_clip_uint16(y);
839  z = av_clip_uint16(z);
840 
841  // convert from XYZlinear to X'Y'Z' and scale from 12bit to 16bit
842  if (desc->flags & AV_PIX_FMT_FLAG_BE) {
843  AV_WB16(dst16 + xp + 0, c->xyzgammainv[x] << 4);
844  AV_WB16(dst16 + xp + 1, c->xyzgammainv[y] << 4);
845  AV_WB16(dst16 + xp + 2, c->xyzgammainv[z] << 4);
846  } else {
847  AV_WL16(dst16 + xp + 0, c->xyzgammainv[x] << 4);
848  AV_WL16(dst16 + xp + 1, c->xyzgammainv[y] << 4);
849  AV_WL16(dst16 + xp + 2, c->xyzgammainv[z] << 4);
850  }
851  }
852 
853  src += src_stride;
854  dst += dst_stride;
855  }
856 }
857 
858 void ff_update_palette(SwsInternal *c, const uint32_t *pal)
859 {
860  for (int i = 0; i < 256; i++) {
861  int r, g, b, y, u, v, a = 0xff;
862  if (c->opts.src_format == AV_PIX_FMT_PAL8) {
863  uint32_t p = pal[i];
864  a = (p >> 24) & 0xFF;
865  r = (p >> 16) & 0xFF;
866  g = (p >> 8) & 0xFF;
867  b = p & 0xFF;
868  } else if (c->opts.src_format == AV_PIX_FMT_RGB8) {
869  r = ( i >> 5 ) * 36;
870  g = ((i >> 2) & 7) * 36;
871  b = ( i & 3) * 85;
872  } else if (c->opts.src_format == AV_PIX_FMT_BGR8) {
873  b = ( i >> 6 ) * 85;
874  g = ((i >> 3) & 7) * 36;
875  r = ( i & 7) * 36;
876  } else if (c->opts.src_format == AV_PIX_FMT_RGB4_BYTE) {
877  r = ( i >> 3 ) * 255;
878  g = ((i >> 1) & 3) * 85;
879  b = ( i & 1) * 255;
880  } else if (c->opts.src_format == AV_PIX_FMT_GRAY8 || c->opts.src_format == AV_PIX_FMT_GRAY8A) {
881  r = g = b = i;
882  } else {
883  av_assert1(c->opts.src_format == AV_PIX_FMT_BGR4_BYTE);
884  b = ( i >> 3 ) * 255;
885  g = ((i >> 1) & 3) * 85;
886  r = ( i & 1) * 255;
887  }
888 #define RGB2YUV_SHIFT 15
889 #define BY ( (int) (0.114 * 219 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
890 #define BV (-(int) (0.081 * 224 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
891 #define BU ( (int) (0.500 * 224 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
892 #define GY ( (int) (0.587 * 219 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
893 #define GV (-(int) (0.419 * 224 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
894 #define GU (-(int) (0.331 * 224 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
895 #define RY ( (int) (0.299 * 219 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
896 #define RV ( (int) (0.500 * 224 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
897 #define RU (-(int) (0.169 * 224 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
898 
899  y = av_clip_uint8((RY * r + GY * g + BY * b + ( 33 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT);
900  u = av_clip_uint8((RU * r + GU * g + BU * b + (257 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT);
901  v = av_clip_uint8((RV * r + GV * g + BV * b + (257 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT);
902  c->pal_yuv[i]= y + (u<<8) + (v<<16) + ((unsigned)a<<24);
903 
904  switch (c->opts.dst_format) {
905  case AV_PIX_FMT_BGR32:
906 #if !HAVE_BIGENDIAN
907  case AV_PIX_FMT_RGB24:
908 #endif
909  c->pal_rgb[i]= r + (g<<8) + (b<<16) + ((unsigned)a<<24);
910  break;
911  case AV_PIX_FMT_BGR32_1:
912 #if HAVE_BIGENDIAN
913  case AV_PIX_FMT_BGR24:
914 #endif
915  c->pal_rgb[i]= a + (r<<8) + (g<<16) + ((unsigned)b<<24);
916  break;
917  case AV_PIX_FMT_RGB32_1:
918 #if HAVE_BIGENDIAN
919  case AV_PIX_FMT_RGB24:
920 #endif
921  c->pal_rgb[i]= a + (b<<8) + (g<<16) + ((unsigned)r<<24);
922  break;
923  case AV_PIX_FMT_GBRP:
924  case AV_PIX_FMT_GBRAP:
925 #if HAVE_BIGENDIAN
926  c->pal_rgb[i]= a + (r<<8) + (b<<16) + ((unsigned)g<<24);
927 #else
928  c->pal_rgb[i]= g + (b<<8) + (r<<16) + ((unsigned)a<<24);
929 #endif
930  break;
931  case AV_PIX_FMT_RGB32:
932 #if !HAVE_BIGENDIAN
933  case AV_PIX_FMT_BGR24:
934 #endif
935  default:
936  c->pal_rgb[i]= b + (g<<8) + (r<<16) + ((unsigned)a<<24);
937  }
938  }
939 }
940 
941 static int scale_internal(SwsContext *sws,
942  const uint8_t * const srcSlice[], const int srcStride[],
943  int srcSliceY, int srcSliceH,
944  uint8_t *const dstSlice[], const int dstStride[],
945  int dstSliceY, int dstSliceH);
946 
948  const uint8_t * const srcSlice[], const int srcStride[],
949  int srcSliceY, int srcSliceH,
950  uint8_t * const dstSlice[], const int dstStride[],
951  int dstSliceY, int dstSliceH)
952 {
953  int ret = scale_internal(c->cascaded_context[0],
954  srcSlice, srcStride, srcSliceY, srcSliceH,
955  c->cascaded_tmp[0], c->cascaded_tmpStride[0], 0, c->opts.src_h);
956 
957  if (ret < 0)
958  return ret;
959 
960  if (c->cascaded_context[2])
961  ret = scale_internal(c->cascaded_context[1], (const uint8_t * const *)c->cascaded_tmp[0],
962  c->cascaded_tmpStride[0], srcSliceY, srcSliceH,
963  c->cascaded_tmp[1], c->cascaded_tmpStride[1], 0, c->opts.dst_h);
964  else
965  ret = scale_internal(c->cascaded_context[1], (const uint8_t * const *)c->cascaded_tmp[0],
966  c->cascaded_tmpStride[0], srcSliceY, srcSliceH,
967  dstSlice, dstStride, dstSliceY, dstSliceH);
968 
969  if (ret < 0)
970  return ret;
971 
972  if (c->cascaded_context[2]) {
973  const int dstY1 = sws_internal(c->cascaded_context[1])->dstY;
974  ret = scale_internal(c->cascaded_context[2], (const uint8_t * const *)c->cascaded_tmp[1],
975  c->cascaded_tmpStride[1], dstY1 - ret, dstY1,
976  dstSlice, dstStride, dstSliceY, dstSliceH);
977  }
978  return ret;
979 }
980 
982  const uint8_t * const srcSlice[], const int srcStride[],
983  int srcSliceY, int srcSliceH,
984  uint8_t * const dstSlice[], const int dstStride[],
985  int dstSliceY, int dstSliceH)
986 {
987  const int dstH0 = c->cascaded_context[0]->dst_h;
988  int ret = scale_internal(c->cascaded_context[0],
989  srcSlice, srcStride, srcSliceY, srcSliceH,
990  c->cascaded_tmp[0], c->cascaded_tmpStride[0],
991  0, dstH0);
992  if (ret < 0)
993  return ret;
994  ret = scale_internal(c->cascaded_context[1],
995  (const uint8_t * const * )c->cascaded_tmp[0], c->cascaded_tmpStride[0],
996  0, dstH0, dstSlice, dstStride, dstSliceY, dstSliceH);
997  return ret;
998 }
999 
1001  const uint8_t * const srcSlice[], const int srcStride[],
1002  int srcSliceY, int srcSliceH,
1003  uint8_t *const dstSlice[], const int dstStride[],
1004  int dstSliceY, int dstSliceH)
1005 {
1007  const int scale_dst = dstSliceY > 0 || dstSliceH < sws->dst_h;
1008  const int frame_start = scale_dst || !c->sliceDir;
1009  int i, ret;
1010  const uint8_t *src2[4];
1011  uint8_t *dst2[4];
1012  int macro_height_src = isBayer(sws->src_format) ? 2 : (1 << c->chrSrcVSubSample);
1013  int macro_height_dst = isBayer(sws->dst_format) ? 2 : (1 << c->chrDstVSubSample);
1014  // copy strides, so they can safely be modified
1015  int srcStride2[4];
1016  int dstStride2[4];
1017  int srcSliceY_internal = srcSliceY;
1018 
1019  if (!srcStride || !dstStride || !dstSlice || !srcSlice) {
1020  av_log(c, AV_LOG_ERROR, "One of the input parameters to sws_scale() is NULL, please check the calling code\n");
1021  return AVERROR(EINVAL);
1022  }
1023 
1024  if ((srcSliceY & (macro_height_src - 1)) ||
1025  ((srcSliceH & (macro_height_src - 1)) && srcSliceY + srcSliceH != sws->src_h) ||
1026  srcSliceY + srcSliceH > sws->src_h ||
1027  (isBayer(sws->src_format) && srcSliceH <= 1)) {
1028  av_log(c, AV_LOG_ERROR, "Slice parameters %d, %d are invalid\n", srcSliceY, srcSliceH);
1029  return AVERROR(EINVAL);
1030  }
1031 
1032  if ((dstSliceY & (macro_height_dst - 1)) ||
1033  ((dstSliceH & (macro_height_dst - 1)) && dstSliceY + dstSliceH != sws->dst_h) ||
1034  dstSliceY + dstSliceH > sws->dst_h) {
1035  av_log(c, AV_LOG_ERROR, "Slice parameters %d, %d are invalid\n", dstSliceY, dstSliceH);
1036  return AVERROR(EINVAL);
1037  }
1038 
1039  if (!check_image_pointers(srcSlice, sws->src_format, srcStride)) {
1040  av_log(c, AV_LOG_ERROR, "bad src image pointers\n");
1041  return AVERROR(EINVAL);
1042  }
1043  if (!check_image_pointers((const uint8_t* const*)dstSlice, sws->dst_format, dstStride)) {
1044  av_log(c, AV_LOG_ERROR, "bad dst image pointers\n");
1045  return AVERROR(EINVAL);
1046  }
1047 
1048  // do not mess up sliceDir if we have a "trailing" 0-size slice
1049  if (srcSliceH == 0)
1050  return 0;
1051 
1052  if (sws->gamma_flag && c->cascaded_context[0])
1053  return scale_gamma(c, srcSlice, srcStride, srcSliceY, srcSliceH,
1054  dstSlice, dstStride, dstSliceY, dstSliceH);
1055 
1056  if (c->cascaded_context[0] && srcSliceY == 0 && srcSliceH == c->cascaded_context[0]->src_h)
1057  return scale_cascaded(c, srcSlice, srcStride, srcSliceY, srcSliceH,
1058  dstSlice, dstStride, dstSliceY, dstSliceH);
1059 
1060  if (!srcSliceY && (sws->flags & SWS_BITEXACT) && sws->dither == SWS_DITHER_ED && c->dither_error[0])
1061  for (i = 0; i < 4; i++)
1062  memset(c->dither_error[i], 0, sizeof(c->dither_error[0][0]) * (sws->dst_w+2));
1063 
1064  if (usePal(sws->src_format))
1065  ff_update_palette(c, (const uint32_t *)srcSlice[1]);
1066 
1067  memcpy(src2, srcSlice, sizeof(src2));
1068  memcpy(dst2, dstSlice, sizeof(dst2));
1069  memcpy(srcStride2, srcStride, sizeof(srcStride2));
1070  memcpy(dstStride2, dstStride, sizeof(dstStride2));
1071 
1072  if (frame_start && !scale_dst) {
1073  if (srcSliceY != 0 && srcSliceY + srcSliceH != sws->src_h) {
1074  av_log(c, AV_LOG_ERROR, "Slices start in the middle!\n");
1075  return AVERROR(EINVAL);
1076  }
1077 
1078  c->sliceDir = (srcSliceY == 0) ? 1 : -1;
1079  } else if (scale_dst)
1080  c->sliceDir = 1;
1081 
1082  if (c->src0Alpha && !c->dst0Alpha && isALPHA(sws->dst_format)) {
1083  uint8_t *base;
1084  int x,y;
1085 
1086  av_fast_malloc(&c->rgb0_scratch, &c->rgb0_scratch_allocated,
1087  FFABS(srcStride[0]) * srcSliceH + 32);
1088  if (!c->rgb0_scratch)
1089  return AVERROR(ENOMEM);
1090 
1091  base = srcStride[0] < 0 ? c->rgb0_scratch - srcStride[0] * (srcSliceH-1) :
1092  c->rgb0_scratch;
1093  for (y=0; y<srcSliceH; y++){
1094  memcpy(base + srcStride[0]*y, src2[0] + srcStride[0]*y, 4*sws->src_w);
1095  for (x=c->src0Alpha-1; x<4*sws->src_w; x+=4) {
1096  base[ srcStride[0]*y + x] = 0xFF;
1097  }
1098  }
1099  src2[0] = base;
1100  }
1101 
1102  if (c->srcXYZ && !(c->dstXYZ && sws->src_w==sws->dst_w && sws->src_h==sws->dst_h)) {
1103  uint8_t *base;
1104 
1105  av_fast_malloc(&c->xyz_scratch, &c->xyz_scratch_allocated,
1106  FFABS(srcStride[0]) * srcSliceH + 32);
1107  if (!c->xyz_scratch)
1108  return AVERROR(ENOMEM);
1109 
1110  base = srcStride[0] < 0 ? c->xyz_scratch - srcStride[0] * (srcSliceH-1) :
1111  c->xyz_scratch;
1112 
1113  ff_xyz12Torgb48(c, base, srcStride[0], src2[0], srcStride[0], sws->src_w, srcSliceH);
1114  src2[0] = base;
1115  }
1116 
1117  if (c->sliceDir != 1) {
1118  // slices go from bottom to top => we flip the image internally
1119  for (i=0; i<4; i++) {
1120  srcStride2[i] *= -1;
1121  dstStride2[i] *= -1;
1122  }
1123 
1124  src2[0] += (srcSliceH - 1) * srcStride[0];
1125  if (!usePal(sws->src_format))
1126  src2[1] += ((srcSliceH >> c->chrSrcVSubSample) - 1) * srcStride[1];
1127  src2[2] += ((srcSliceH >> c->chrSrcVSubSample) - 1) * srcStride[2];
1128  src2[3] += (srcSliceH - 1) * srcStride[3];
1129  dst2[0] += ( sws->dst_h - 1) * dstStride[0];
1130  dst2[1] += ((sws->dst_h >> c->chrDstVSubSample) - 1) * dstStride[1];
1131  dst2[2] += ((sws->dst_h >> c->chrDstVSubSample) - 1) * dstStride[2];
1132  dst2[3] += ( sws->dst_h - 1) * dstStride[3];
1133 
1134  srcSliceY_internal = sws->src_h-srcSliceY-srcSliceH;
1135  }
1137  reset_ptr((void*)dst2, sws->dst_format);
1138 
1139  if (c->convert_unscaled) {
1140  int offset = srcSliceY_internal;
1141  int slice_h = srcSliceH;
1142 
1143  // for dst slice scaling, offset the pointers to match the unscaled API
1144  if (scale_dst) {
1145  av_assert0(offset == 0);
1146  for (i = 0; i < 4 && src2[i]; i++) {
1147  if (!src2[i] || (i > 0 && usePal(sws->src_format)))
1148  break;
1149  src2[i] += (dstSliceY >> ((i == 1 || i == 2) ? c->chrSrcVSubSample : 0)) * srcStride2[i];
1150  }
1151 
1152  for (i = 0; i < 4 && dst2[i]; i++) {
1153  if (!dst2[i] || (i > 0 && usePal(sws->dst_format)))
1154  break;
1155  dst2[i] -= (dstSliceY >> ((i == 1 || i == 2) ? c->chrDstVSubSample : 0)) * dstStride2[i];
1156  }
1157  offset = dstSliceY;
1158  slice_h = dstSliceH;
1159  }
1160 
1161  ret = c->convert_unscaled(c, src2, srcStride2, offset, slice_h,
1162  dst2, dstStride2);
1163  if (scale_dst)
1164  dst2[0] += dstSliceY * dstStride2[0];
1165  } else {
1166  ret = ff_swscale(c, src2, srcStride2, srcSliceY_internal, srcSliceH,
1167  dst2, dstStride2, dstSliceY, dstSliceH);
1168  }
1169 
1170  if (c->dstXYZ && !(c->srcXYZ && sws->src_w==sws->dst_w && sws->src_h==sws->dst_h)) {
1171  uint8_t *dst;
1172 
1173  if (scale_dst) {
1174  dst = dst2[0];
1175  } else {
1176  int dstY = c->dstY ? c->dstY : srcSliceY + srcSliceH;
1177 
1178  av_assert0(dstY >= ret);
1179  av_assert0(ret >= 0);
1180  av_assert0(sws->dst_h >= dstY);
1181  dst = dst2[0] + (dstY - ret) * dstStride2[0];
1182  }
1183 
1184  /* replace on the same data */
1185  ff_rgb48Toxyz12(c, dst, dstStride2[0], dst, dstStride2[0], sws->dst_w, ret);
1186  }
1187 
1188  /* reset slice direction at end of frame */
1189  if ((srcSliceY_internal + srcSliceH == sws->src_h) || scale_dst)
1190  c->sliceDir = 0;
1191 
1192  return ret;
1193 }
1194 
1196 {
1198  av_frame_unref(c->frame_src);
1199  av_frame_unref(c->frame_dst);
1200  c->src_ranges.nb_ranges = 0;
1201 }
1202 
1204 {
1206  int ret, allocated = 0;
1207 
1208  ret = av_frame_ref(c->frame_src, src);
1209  if (ret < 0)
1210  return ret;
1211 
1212  if (!dst->buf[0]) {
1213  dst->width = sws->dst_w;
1214  dst->height = sws->dst_h;
1215  dst->format = sws->dst_format;
1216 
1217  ret = av_frame_get_buffer(dst, 0);
1218  if (ret < 0)
1219  return ret;
1220  allocated = 1;
1221  }
1222 
1223  ret = av_frame_ref(c->frame_dst, dst);
1224  if (ret < 0) {
1225  if (allocated)
1227 
1228  return ret;
1229  }
1230 
1231  return 0;
1232 }
1233 
1235  unsigned int slice_height)
1236 {
1238  int ret;
1239 
1240  ret = ff_range_add(&c->src_ranges, slice_start, slice_height);
1241  if (ret < 0)
1242  return ret;
1243 
1244  return 0;
1245 }
1246 
1248 {
1250  if (c->slice_ctx)
1251  return sws_internal(c->slice_ctx[0])->dst_slice_align;
1252 
1253  return c->dst_slice_align;
1254 }
1255 
1257  unsigned int slice_height)
1258 {
1260  unsigned int align = sws_receive_slice_alignment(sws);
1261  uint8_t *dst[4];
1262 
1263  /* wait until complete input has been received */
1264  if (!(c->src_ranges.nb_ranges == 1 &&
1265  c->src_ranges.ranges[0].start == 0 &&
1266  c->src_ranges.ranges[0].len == sws->src_h))
1267  return AVERROR(EAGAIN);
1268 
1269  if ((slice_start > 0 || slice_height < sws->dst_h) &&
1270  (slice_start % align || slice_height % align)) {
1272  "Incorrectly aligned output: %u/%u not multiples of %u\n",
1273  slice_start, slice_height, align);
1274  return AVERROR(EINVAL);
1275  }
1276 
1277  if (c->slicethread) {
1278  int nb_jobs = c->nb_slice_ctx;
1279  int ret = 0;
1280 
1281  if (c->slice_ctx[0]->dither == SWS_DITHER_ED)
1282  nb_jobs = 1;
1283 
1284  c->dst_slice_start = slice_start;
1285  c->dst_slice_height = slice_height;
1286 
1287  avpriv_slicethread_execute(c->slicethread, nb_jobs, 0);
1288 
1289  for (int i = 0; i < c->nb_slice_ctx; i++) {
1290  if (c->slice_err[i] < 0) {
1291  ret = c->slice_err[i];
1292  break;
1293  }
1294  }
1295 
1296  memset(c->slice_err, 0, c->nb_slice_ctx * sizeof(*c->slice_err));
1297 
1298  return ret;
1299  }
1300 
1301  for (int i = 0; i < FF_ARRAY_ELEMS(dst); i++) {
1302  ptrdiff_t offset = c->frame_dst->linesize[i] * (ptrdiff_t)(slice_start >> c->chrDstVSubSample);
1303  dst[i] = FF_PTR_ADD(c->frame_dst->data[i], offset);
1304  }
1305 
1306  return scale_internal(sws, (const uint8_t * const *)c->frame_src->data,
1307  c->frame_src->linesize, 0, sws->src_h,
1308  dst, c->frame_dst->linesize, slice_start, slice_height);
1309 }
1310 
1311 static void get_frame_pointers(const AVFrame *frame, uint8_t *data[4],
1312  int linesize[4], int field)
1313 {
1314  for (int i = 0; i < 4; i++) {
1315  data[i] = frame->data[i];
1316  linesize[i] = frame->linesize[i];
1317  }
1318 
1319  if (!(frame->flags & AV_FRAME_FLAG_INTERLACED)) {
1320  av_assert1(!field);
1321  return;
1322  }
1323 
1324  if (field == FIELD_BOTTOM) {
1325  /* Odd rows, offset by one line */
1327  for (int i = 0; i < 4; i++) {
1328  data[i] += linesize[i];
1329  if (desc->flags & AV_PIX_FMT_FLAG_PAL)
1330  break;
1331  }
1332  }
1333 
1334  /* Take only every second line */
1335  for (int i = 0; i < 4; i++)
1336  linesize[i] <<= 1;
1337 }
1338 
1339 /* Subset of av_frame_ref() that only references (video) data buffers */
1340 static int frame_ref(AVFrame *dst, const AVFrame *src)
1341 {
1342  /* ref the buffers */
1343  for (int i = 0; i < FF_ARRAY_ELEMS(src->buf); i++) {
1344  if (!src->buf[i])
1345  continue;
1346  dst->buf[i] = av_buffer_ref(src->buf[i]);
1347  if (!dst->buf[i])
1348  return AVERROR(ENOMEM);
1349  }
1350 
1351  memcpy(dst->data, src->data, sizeof(src->data));
1352  memcpy(dst->linesize, src->linesize, sizeof(src->linesize));
1353  return 0;
1354 }
1355 
1357 {
1358  int ret;
1360  if (!src || !dst)
1361  return AVERROR(EINVAL);
1362 
1363  if (c->frame_src) {
1364  /* Context has been initialized with explicit values, fall back to
1365  * legacy API */
1366  ret = sws_frame_start(sws, dst, src);
1367  if (ret < 0)
1368  return ret;
1369 
1370  ret = sws_send_slice(sws, 0, src->height);
1371  if (ret >= 0)
1372  ret = sws_receive_slice(sws, 0, dst->height);
1373 
1374  sws_frame_end(sws);
1375 
1376  return ret;
1377  }
1378 
1379  ret = sws_frame_setup(sws, dst, src);
1380  if (ret < 0)
1381  return ret;
1382 
1383  if (!src->data[0])
1384  return 0;
1385 
1386  if (c->graph[FIELD_TOP]->noop &&
1387  (!c->graph[FIELD_BOTTOM] || c->graph[FIELD_BOTTOM]->noop) &&
1388  src->buf[0] && !dst->buf[0] && !dst->data[0])
1389  {
1390  /* Lightweight refcopy */
1391  ret = frame_ref(dst, src);
1392  if (ret < 0)
1393  return ret;
1394  } else {
1395  if (!dst->data[0]) {
1396  ret = av_frame_get_buffer(dst, 0);
1397  if (ret < 0)
1398  return ret;
1399  }
1400 
1401  for (int field = 0; field < 2; field++) {
1402  SwsGraph *graph = c->graph[field];
1403  uint8_t *dst_data[4], *src_data[4];
1404  int dst_linesize[4], src_linesize[4];
1405  get_frame_pointers(dst, dst_data, dst_linesize, field);
1406  get_frame_pointers(src, src_data, src_linesize, field);
1407  ff_sws_graph_run(graph, dst_data, dst_linesize,
1408  (const uint8_t **) src_data, src_linesize);
1409  if (!graph->dst.interlaced)
1410  break;
1411  }
1412  }
1413 
1414  return 0;
1415 }
1416 
1418 {
1419 #define VALIDATE(field, min, max) \
1420  if (ctx->field < min || ctx->field > max) { \
1421  av_log(ctx, AV_LOG_ERROR, "'%s' (%d) out of range [%d, %d]\n", \
1422  #field, (int) ctx->field, min, max); \
1423  return AVERROR(EINVAL); \
1424  }
1425 
1426  VALIDATE(threads, 0, SWS_MAX_THREADS);
1427  VALIDATE(dither, 0, SWS_DITHER_NB - 1)
1428  VALIDATE(alpha_blend, 0, SWS_ALPHA_BLEND_NB - 1)
1429  return 0;
1430 }
1431 
1433 {
1435  const char *err_msg;
1436  int ret;
1437 
1438  if (!src || !dst)
1439  return AVERROR(EINVAL);
1440  if ((ret = validate_params(ctx)) < 0)
1441  return ret;
1442 
1443  for (int field = 0; field < 2; field++) {
1444  SwsFormat src_fmt = ff_fmt_from_frame(src, field);
1445  SwsFormat dst_fmt = ff_fmt_from_frame(dst, field);
1446  int src_ok, dst_ok;
1447 
1448  if ((src->flags ^ dst->flags) & AV_FRAME_FLAG_INTERLACED) {
1449  err_msg = "Cannot convert interlaced to progressive frames or vice versa.\n";
1450  ret = AVERROR(EINVAL);
1451  goto fail;
1452  }
1453 
1454  src_ok = ff_test_fmt(&src_fmt, 0);
1455  dst_ok = ff_test_fmt(&dst_fmt, 1);
1456  if ((!src_ok || !dst_ok) && !ff_props_equal(&src_fmt, &dst_fmt)) {
1457  err_msg = src_ok ? "Unsupported output" : "Unsupported input";
1458  ret = AVERROR(ENOTSUP);
1459  goto fail;
1460  }
1461 
1462  ret = ff_sws_graph_reinit(ctx, &dst_fmt, &src_fmt, field, &s->graph[field]);
1463  if (ret < 0) {
1464  err_msg = "Failed initializing scaling graph";
1465  goto fail;
1466  }
1467 
1468  if (s->graph[field]->incomplete && ctx->flags & SWS_STRICT) {
1469  err_msg = "Incomplete scaling graph";
1470  ret = AVERROR(EINVAL);
1471  goto fail;
1472  }
1473 
1474  if (!src_fmt.interlaced) {
1475  ff_sws_graph_free(&s->graph[FIELD_BOTTOM]);
1476  break;
1477  }
1478 
1479  continue;
1480 
1481  fail:
1482  av_log(ctx, AV_LOG_ERROR, "%s (%s): fmt:%s csp:%s prim:%s trc:%s ->"
1483  " fmt:%s csp:%s prim:%s trc:%s\n",
1484  err_msg, av_err2str(ret),
1489 
1490  for (int i = 0; i < FF_ARRAY_ELEMS(s->graph); i++)
1491  ff_sws_graph_free(&s->graph[i]);
1492 
1493  return ret;
1494  }
1495 
1496  return 0;
1497 }
1498 
1499 /**
1500  * swscale wrapper, so we don't need to export the SwsContext.
1501  * Assumes planar YUV to be in YUV order instead of YVU.
1502  */
1504  const uint8_t * const srcSlice[],
1505  const int srcStride[], int srcSliceY,
1506  int srcSliceH, uint8_t *const dst[],
1507  const int dstStride[])
1508 {
1510  if (c->nb_slice_ctx) {
1511  sws = c->slice_ctx[0];
1512  c = sws_internal(sws);
1513  }
1514 
1515  return scale_internal(sws, srcSlice, srcStride, srcSliceY, srcSliceH,
1516  dst, dstStride, 0, sws->dst_h);
1517 }
1518 
1519 void ff_sws_slice_worker(void *priv, int jobnr, int threadnr,
1520  int nb_jobs, int nb_threads)
1521 {
1522  SwsInternal *parent = priv;
1523  SwsContext *sws = parent->slice_ctx[threadnr];
1525 
1526  const int slice_height = FFALIGN(FFMAX((parent->dst_slice_height + nb_jobs - 1) / nb_jobs, 1),
1527  c->dst_slice_align);
1528  const int slice_start = jobnr * slice_height;
1529  const int slice_end = FFMIN((jobnr + 1) * slice_height, parent->dst_slice_height);
1530  int err = 0;
1531 
1532  if (slice_end > slice_start) {
1533  uint8_t *dst[4] = { NULL };
1534 
1535  for (int i = 0; i < FF_ARRAY_ELEMS(dst) && parent->frame_dst->data[i]; i++) {
1536  const int vshift = (i == 1 || i == 2) ? c->chrDstVSubSample : 0;
1537  const ptrdiff_t offset = parent->frame_dst->linesize[i] *
1538  (ptrdiff_t)((slice_start + parent->dst_slice_start) >> vshift);
1539 
1540  dst[i] = parent->frame_dst->data[i] + offset;
1541  }
1542 
1543  err = scale_internal(sws, (const uint8_t * const *)parent->frame_src->data,
1544  parent->frame_src->linesize, 0, sws->src_h,
1545  dst, parent->frame_dst->linesize,
1547  }
1548 
1549  parent->slice_err[threadnr] = err;
1550 }
sws_init_swscale
static av_cold void sws_init_swscale(SwsInternal *c)
Definition: swscale.c:659
isBayer
static av_always_inline int isBayer(enum AVPixelFormat pix_fmt)
Definition: swscale_internal.h:842
flags
const SwsFlags flags[]
Definition: swscale.c:61
ff_test_fmt
int ff_test_fmt(const SwsFormat *fmt, int output)
Definition: format.c:571
_dst
uint8_t * _dst
Definition: dsp.h:56
yuv2planar1_fn
void(* yuv2planar1_fn)(const int16_t *src, uint8_t *dest, int dstW, const uint8_t *dither, int offset)
Write one line of horizontally scaled data to planar output without any additional vertical scaling (...
Definition: swscale_internal.h:108
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:216
process
static void process(NormalizeContext *s, AVFrame *in, AVFrame *out)
Definition: vf_normalize.c:155
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
SwsPlane::sliceH
int sliceH
number of lines
Definition: swscale_internal.h:1094
ff_props_equal
static int ff_props_equal(const SwsFormat *fmt1, const SwsFormat *fmt2)
Definition: format.h:118
isPacked
static av_always_inline int isPacked(enum AVPixelFormat pix_fmt)
Definition: swscale_internal.h:887
r
const char * r
Definition: vf_curves.c:127
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
mem_internal.h
ff_fmt_from_frame
SwsFormat ff_fmt_from_frame(const AVFrame *frame, int field)
This function also sanitizes and strips the input data, removing irrelevant fields for certain format...
Definition: format.c:307
av_frame_get_buffer
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:206
AV_PIX_FMT_BGR32
#define AV_PIX_FMT_BGR32
Definition: pixfmt.h:513
ff_rgb48Toxyz12
void ff_rgb48Toxyz12(const SwsInternal *c, uint8_t *dst, int dst_stride, const uint8_t *src, int src_stride, int w, int h)
Definition: swscale.c:799
SwsFormat::interlaced
int interlaced
Definition: format.h:79
lumRangeToJpeg16_c
static void lumRangeToJpeg16_c(int16_t *_dst, int width, uint32_t coeff, int64_t offset)
Definition: swscale.c:233
lumRangeToJpeg_c
static void lumRangeToJpeg_c(int16_t *dst, int width, uint32_t _coeff, int64_t _offset)
Definition: swscale.c:185
ff_sws_init_range_convert_aarch64
av_cold void ff_sws_init_range_convert_aarch64(SwsInternal *c)
Definition: swscale.c:285
frame_start
static void frame_start(MPVMainEncContext *const m)
Definition: mpegvideo_enc.c:1924
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:3456
SwsContext::src_w
int src_w
Deprecated frame property overrides, for the legacy API only.
Definition: swscale.h:235
ff_sws_graph_reinit
int ff_sws_graph_reinit(SwsContext *ctx, const SwsFormat *dst, const SwsFormat *src, int field, SwsGraph **out_graph)
Wrapper around ff_sws_graph_create() that reuses the existing graph if the format is compatible.
Definition: graph.c:765
ff_rotate_slice
int ff_rotate_slice(SwsSlice *s, int lum, int chr)
Definition: slice.c:120
int64_t
long long int64_t
Definition: coverity.c:34
AV_PIX_FMT_FLAG_FLOAT
#define AV_PIX_FMT_FLAG_FLOAT
The pixel format contains IEEE-754 floating point values.
Definition: pixdesc.h:158
SwsSlice::plane
SwsPlane plane[MAX_SLICE_PLANES]
color planes
Definition: swscale_internal.h:1112
avpriv_slicethread_execute
void avpriv_slicethread_execute(AVSliceThread *ctx, int nb_jobs, int execute_main)
Execute slice threading.
Definition: slicethread.c:271
ff_sws_init_range_convert_loongarch
av_cold void ff_sws_init_range_convert_loongarch(SwsInternal *c)
Definition: swscale_init_loongarch.c:27
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:427
pixdesc.h
w
uint8_t w
Definition: llviddspenc.c:38
u
#define u(width, name, range_min, range_max)
Definition: cbs_apv.c:68
isGray
static av_always_inline int isGray(enum AVPixelFormat pix_fmt)
Definition: swscale_internal.h:787
GV
#define GV
SWS_BITEXACT
@ SWS_BITEXACT
Definition: swscale.h:156
b
#define b
Definition: input.c:42
SwsFilterDescriptor
Struct which holds all necessary data for processing a slice.
Definition: swscale_internal.h:1119
yuv2planeX
static void FUNC() yuv2planeX(const int16_t *filter, int filterSize, const int16_t **src, uint8_t *dest, int dstW, const uint8_t *dither, int offset)
Definition: swscale_ppc_template.c:84
data
const char data[16]
Definition: mxf.c:149
AV_PIX_FMT_MONOWHITE
@ AV_PIX_FMT_MONOWHITE
Y , 1bpp, 0 is white, 1 is black, in each byte pixels are ordered from the msb to the lsb.
Definition: pixfmt.h:82
AV_PIX_FMT_RGB32_1
#define AV_PIX_FMT_RGB32_1
Definition: pixfmt.h:512
SwsContext::flags
unsigned flags
Bitmask of SWS_*.
Definition: swscale.h:202
base
uint8_t base
Definition: vp3data.h:128
filter
void(* filter)(uint8_t *src, int stride, int qscale)
Definition: h263dsp.c:29
sws_receive_slice
int sws_receive_slice(SwsContext *sws, unsigned int slice_start, unsigned int slice_height)
Request a horizontal slice of the output data to be written into the frame previously provided to sws...
Definition: swscale.c:1256
AV_PIX_FMT_BGR24
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:76
ff_sws_init_swscale_riscv
av_cold void ff_sws_init_swscale_riscv(SwsInternal *c)
Definition: swscale.c:74
RV
#define RV
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
av_buffer_ref
AVBufferRef * av_buffer_ref(const AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:103
av_get_cpu_flags
int av_get_cpu_flags(void)
Return the flags which specify extensions supported by the CPU.
Definition: cpu.c:109
DEBUG_BUFFERS
#define DEBUG_BUFFERS(...)
Definition: swscale.c:255
bit_depth
static void bit_depth(AudioStatsContext *s, const uint64_t *const mask, uint8_t *depth)
Definition: af_astats.c:246
cpu_flags
static atomic_int cpu_flags
Definition: cpu.c:56
hScale16To15_c
static void hScale16To15_c(SwsInternal *c, int16_t *dst, int dstW, const uint8_t *_src, const int16_t *filter, const int32_t *filterPos, int filterSize)
Definition: swscale.c:95
_src
uint8_t ptrdiff_t const uint8_t * _src
Definition: dsp.h:56
SwsInternal::frame_dst
AVFrame * frame_dst
Definition: swscale_internal.h:358
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:448
SWS_FAST_BILINEAR
@ SWS_FAST_BILINEAR
Scaler selection options.
Definition: swscale.h:98
is16BPS
static av_always_inline int is16BPS(enum AVPixelFormat pix_fmt)
Definition: swscale_internal.h:727
ff_sws_init_input_funcs
void ff_sws_init_input_funcs(SwsInternal *c, planar1_YV12_fn *lumToYV12, planar1_YV12_fn *alpToYV12, planar2_YV12_fn *chrToYV12, planarX_YV12_fn *readLumPlanar, planarX_YV12_fn *readAlpPlanar, planarX2_YV12_fn *readChrPlanar)
validate_params
static int validate_params(SwsContext *ctx)
Definition: swscale.c:1417
chrRangeToJpeg16_c
static void chrRangeToJpeg16_c(int16_t *_dstU, int16_t *_dstV, int width, uint32_t coeff, int64_t offset)
Definition: swscale.c:207
slice_end
static int slice_end(AVCodecContext *avctx, AVFrame *pict, int *got_output)
Handle slice ends.
Definition: mpeg12dec.c:1688
av_color_space_name
const char * av_color_space_name(enum AVColorSpace space)
Definition: pixdesc.c:3856
AV_PIX_FMT_GBRAP
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:212
chrRangeFromJpeg_c
static void chrRangeFromJpeg_c(int16_t *dstU, int16_t *dstV, int width, uint32_t _coeff, int64_t _offset)
Definition: swscale.c:173
SWS_MAX_THREADS
#define SWS_MAX_THREADS
Definition: swscale_internal.h:52
fail
#define fail()
Definition: checkasm.h:206
chrRangeFromJpeg16_c
static void chrRangeFromJpeg16_c(int16_t *_dstU, int16_t *_dstV, int width, uint32_t coeff, int64_t offset)
Definition: swscale.c:221
SwsInternal::frame_src
AVFrame * frame_src
Definition: swscale_internal.h:357
sws_frame_setup
int sws_frame_setup(SwsContext *ctx, const AVFrame *dst, const AVFrame *src)
Like sws_scale_frame, but without actually scaling.
Definition: swscale.c:1432
val
static double val(void *priv, double ch)
Definition: aeval.c:77
SWS_ALPHA_BLEND_NB
@ SWS_ALPHA_BLEND_NB
Definition: swscale.h:91
isNBPS
static av_always_inline int isNBPS(enum AVPixelFormat pix_fmt)
Definition: swscale_internal.h:741
init_range_convert_constants
static void init_range_convert_constants(SwsInternal *c)
Definition: swscale.c:588
RY
#define RY
SwsColor::trc
enum AVColorTransferCharacteristic trc
Definition: format.h:62
AV_PIX_FMT_BGR8
@ AV_PIX_FMT_BGR8
packed RGB 3:3:2, 8bpp, (msb)2B 3G 3R(lsb)
Definition: pixfmt.h:90
avassert.h
hScale8To19_c
static void hScale8To19_c(SwsInternal *c, int16_t *_dst, int dstW, const uint8_t *src, const int16_t *filter, const int32_t *filterPos, int filterSize)
Definition: swscale.c:140
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:210
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:106
yuv2packed2_fn
void(* yuv2packed2_fn)(SwsInternal *c, const int16_t *lumSrc[2], const int16_t *chrUSrc[2], const int16_t *chrVSrc[2], const int16_t *alpSrc[2], uint8_t *dest, int dstW, int yalpha, int uvalpha, int y)
Write one line of horizontally scaled Y/U/V/A to packed-pixel YUV/RGB output by doing bilinear scalin...
Definition: swscale_internal.h:214
SwsContext::dither
SwsDither dither
Dither mode.
Definition: swscale.h:217
SwsInternal::slice_err
int * slice_err
Definition: swscale_internal.h:326
emms_c
#define emms_c()
Definition: emms.h:63
intreadwrite.h
SwsInternal::slice_ctx
SwsContext ** slice_ctx
Definition: swscale_internal.h:325
s
#define s(width, name)
Definition: cbs_vp9.c:198
GU
#define GU
ff_update_palette
void ff_update_palette(SwsInternal *c, const uint32_t *pal)
Definition: swscale.c:858
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:60
ff_sws_init_swscale_arm
av_cold void ff_sws_init_swscale_arm(SwsInternal *c)
Definition: swscale.c:33
AVFormatContext::flags
int flags
Flags modifying the (de)muxer behaviour.
Definition: avformat.h:1415
g
const char * g
Definition: vf_curves.c:128
pix_fmt
static enum AVPixelFormat pix_fmt
Definition: demux_decode.c:41
SwsSlice::width
int width
Slice line width.
Definition: swscale_internal.h:1106
bits
uint8_t bits
Definition: vp3data.h:128
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:41
VALIDATE
#define VALIDATE(field, min, max)
ctx
AVFormatContext * ctx
Definition: movenc.c:49
AV_RL16
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_RL16
Definition: bytestream.h:94
chrRangeToJpeg_c
static void chrRangeToJpeg_c(int16_t *dstU, int16_t *dstV, int width, uint32_t _coeff, int64_t _offset)
Definition: swscale.c:159
ff_hcscale_fast_c
void ff_hcscale_fast_c(SwsInternal *c, int16_t *dst1, int16_t *dst2, int dstWidth, const uint8_t *src1, const uint8_t *src2, int srcW, int xInc)
Definition: hscale_fast_bilinear.c:38
field
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this field
Definition: writing_filters.txt:78
ff_xyz12Torgb48
void ff_xyz12Torgb48(const SwsInternal *c, uint8_t *dst, int dst_stride, const uint8_t *src, int src_stride, int w, int h)
Definition: swscale.c:740
ff_sws_init_range_convert_riscv
av_cold void ff_sws_init_range_convert_riscv(SwsInternal *c)
Definition: swscale.c:29
AV_PIX_FMT_BGR32_1
#define AV_PIX_FMT_BGR32_1
Definition: pixfmt.h:514
sws_frame_end
void sws_frame_end(SwsContext *sws)
Finish the scaling process for a pair of source/destination frames previously submitted with sws_fram...
Definition: swscale.c:1195
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:74
ff_sws_init_range_convert_x86
av_cold void ff_sws_init_range_convert_x86(SwsInternal *c)
Definition: swscale.c:474
AV_PIX_FMT_GRAY8A
@ AV_PIX_FMT_GRAY8A
alias for AV_PIX_FMT_YA8
Definition: pixfmt.h:143
scale_internal
static int scale_internal(SwsContext *sws, const uint8_t *const srcSlice[], const int srcStride[], int srcSliceY, int srcSliceH, uint8_t *const dstSlice[], const int dstStride[], int dstSliceY, int dstSliceH)
Definition: swscale.c:1000
fillPlane
static av_always_inline void fillPlane(uint8_t *plane, int stride, int width, int height, int y, uint8_t val)
Definition: swscale.c:54
NULL
#define NULL
Definition: coverity.c:32
format
New swscale design to change SwsGraph is what coordinates multiple passes These can include cascaded scaling error diffusion and so on Or we could have separate passes for the vertical and horizontal scaling In between each SwsPass lies a fully allocated image buffer Graph passes may have different levels of e g we can have a single threaded error diffusion pass following a multi threaded scaling pass SwsGraph is internally recreated whenever the image format
Definition: swscale-v2.txt:14
SwsPlane::available_lines
int available_lines
max number of lines that can be hold by this plane
Definition: swscale_internal.h:1092
AV_WB16
#define AV_WB16(p, v)
Definition: intreadwrite.h:401
SwsContext::gamma_flag
int gamma_flag
Use gamma correct scaling.
Definition: swscale.h:227
AV_PIX_FMT_MONOBLACK
@ AV_PIX_FMT_MONOBLACK
Y , 1bpp, 0 is black, 1 is white, in each byte pixels are ordered from the msb to the lsb.
Definition: pixfmt.h:83
V
#define V
Definition: avdct.c:32
FF_PTR_ADD
#define FF_PTR_ADD(ptr, off)
Definition: internal.h:80
av_color_primaries_name
const char * av_color_primaries_name(enum AVColorPrimaries primaries)
Definition: pixdesc.c:3790
AV_PIX_FMT_RGB8
@ AV_PIX_FMT_RGB8
packed RGB 3:3:2, 8bpp, (msb)3R 3G 2B(lsb)
Definition: pixfmt.h:93
hScale8To15_c
static void hScale8To15_c(SwsInternal *c, int16_t *dst, int dstW, const uint8_t *src, const int16_t *filter, const int32_t *filterPos, int filterSize)
Definition: swscale.c:124
ff_sws_init_range_convert
av_cold void ff_sws_init_range_convert(SwsInternal *c)
Definition: swscale.c:623
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:81
hScale16To19_c
static void hScale16To19_c(SwsInternal *c, int16_t *_dst, int dstW, const uint8_t *_src, const int16_t *filter, const int32_t *filterPos, int filterSize)
Definition: swscale.c:65
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
SwsInternal::dstY
int dstY
Last destination vertical line output from last slice.
Definition: swscale_internal.h:440
AV_PIX_FMT_BGR4_BYTE
@ AV_PIX_FMT_BGR4_BYTE
packed RGB 1:2:1, 8bpp, (msb)1B 2G 1R(lsb)
Definition: pixfmt.h:92
ff_range_add
int ff_range_add(RangeList *r, unsigned int start, unsigned int len)
Definition: utils.c:2389
attribute_align_arg
#define attribute_align_arg
Definition: internal.h:50
yuv2packedX_fn
void(* yuv2packedX_fn)(SwsInternal *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrc, const int16_t **chrVSrc, int chrFilterSize, const int16_t **alpSrc, uint8_t *dest, int dstW, int y)
Write one line of horizontally scaled Y/U/V/A to packed-pixel YUV/RGB output by doing multi-point ver...
Definition: swscale_internal.h:246
AV_CPU_FLAG_SSE2
#define AV_CPU_FLAG_SSE2
PIV SSE2 functions.
Definition: cpu.h:37
ff_sws_graph_free
void ff_sws_graph_free(SwsGraph **pgraph)
Uninitialize any state associate with this filter graph and free it.
Definition: graph.c:726
ff_sws_slice_worker
void ff_sws_slice_worker(void *priv, int jobnr, int threadnr, int nb_jobs, int nb_threads)
Definition: swscale.c:1519
isBE
static av_always_inline int isBE(enum AVPixelFormat pix_fmt)
Definition: swscale_internal.h:748
AV_PIX_FMT_RGB24
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:75
ff_sws_init_swscale_loongarch
av_cold void ff_sws_init_swscale_loongarch(SwsInternal *c)
Definition: swscale_init_loongarch.c:62
height
#define height
Definition: dsp.h:89
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:278
DECLARE_ALIGNED
#define DECLARE_ALIGNED(n, t, v)
Definition: mem_internal.h:104
get_frame_pointers
static void get_frame_pointers(const AVFrame *frame, uint8_t *data[4], int linesize[4], int field)
Definition: swscale.c:1311
fillPlane16
static void fillPlane16(uint8_t *plane, int stride, int width, int height, int y, int alpha, int bits, const int big_endian)
Definition: swscale_internal.h:1052
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:87
usePal
static av_always_inline int usePal(enum AVPixelFormat pix_fmt)
Definition: swscale_internal.h:918
BV
#define BV
cpu.h
isAnyRGB
static av_always_inline int isAnyRGB(enum AVPixelFormat pix_fmt)
Definition: swscale_internal.h:856
av_err2str
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:122
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:424
SwsContext::src_h
int src_h
Width and height of the source frame.
Definition: swscale.h:235
SwsFormat
Definition: format.h:77
RGB2YUV_SHIFT
#define RGB2YUV_SHIFT
align
static const uint8_t *BS_FUNC() align(BSCTX *bc)
Skip bits to a byte boundary.
Definition: bitstream_template.h:419
is32BPS
static av_always_inline int is32BPS(enum AVPixelFormat pix_fmt)
Definition: swscale_internal.h:734
AV_WL16
#define AV_WL16(p, v)
Definition: intreadwrite.h:408
AV_PIX_FMT_RGB32
#define AV_PIX_FMT_RGB32
Definition: pixfmt.h:511
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
lumRangeFromJpeg_c
static void lumRangeFromJpeg_c(int16_t *dst, int width, uint32_t _coeff, int64_t _offset)
Definition: swscale.c:197
SWS_DITHER_NB
@ SWS_DITHER_NB
Definition: swscale.h:86
ff_sws_init_swscale_ppc
av_cold void ff_sws_init_swscale_ppc(SwsInternal *c)
Definition: swscale_altivec.c:233
SwsContext::dst_format
int dst_format
Destination pixel format.
Definition: swscale.h:238
fillPlane32
static void fillPlane32(uint8_t *plane, int stride, int width, int height, int y, int alpha, int bits, const int big_endian, int is_float)
Definition: swscale_internal.h:1066
GY
#define GY
Y
#define Y
Definition: boxblur.h:37
yuv2anyX_fn
void(* yuv2anyX_fn)(SwsInternal *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrc, const int16_t **chrVSrc, int chrFilterSize, const int16_t **alpSrc, uint8_t **dest, int dstW, int y)
Write one line of horizontally scaled Y/U/V/A to YUV/RGB output by doing multi-point vertical scaling...
Definition: swscale_internal.h:280
ff_sws_init_swscale_x86
av_cold void ff_sws_init_swscale_x86(SwsInternal *c)
Definition: swscale.c:490
scale_cascaded
static int scale_cascaded(SwsInternal *c, const uint8_t *const srcSlice[], const int srcStride[], int srcSliceY, int srcSliceH, uint8_t *const dstSlice[], const int dstStride[], int dstSliceY, int dstSliceH)
Definition: swscale.c:981
emms.h
SwsInternal::dst_slice_align
unsigned int dst_slice_align
Definition: swscale_internal.h:687
sws
static SwsContext * sws[3]
Definition: swscale.c:73
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:68
SwsGraph::dst
SwsFormat dst
Definition: graph.h:128
SwsFormat::format
enum AVPixelFormat format
Definition: format.h:80
sws_send_slice
int sws_send_slice(SwsContext *sws, unsigned int slice_start, unsigned int slice_height)
Indicate that a horizontal slice of input data is available in the source frame previously provided t...
Definition: swscale.c:1234
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
FIELD_TOP
@ FIELD_TOP
Definition: format.h:56
ff_sws_init_scale
void ff_sws_init_scale(SwsInternal *c)
Definition: swscale.c:692
src2
const pixel * src2
Definition: h264pred_template.c:421
common.h
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:57
check_image_pointers
static int check_image_pointers(const uint8_t *const data[4], enum AVPixelFormat pix_fmt, const int linesizes[4])
Definition: swscale.c:723
av_always_inline
#define av_always_inline
Definition: attributes.h:63
swscale_internal.h
yuv2interleavedX_fn
void(* yuv2interleavedX_fn)(enum AVPixelFormat dstFormat, const uint8_t *chrDither, const int16_t *chrFilter, int chrFilterSize, const int16_t **chrUSrc, const int16_t **chrVSrc, uint8_t *dest, int dstW)
Write one line of horizontally scaled chroma to interleaved output with multi-point vertical scaling ...
Definition: swscale_internal.h:144
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
AV_PIX_FMT_FLAG_BE
#define AV_PIX_FMT_FLAG_BE
Pixel format is big-endian.
Definition: pixdesc.h:116
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:496
SwsContext::dst_h
int dst_h
Width and height of the destination frame.
Definition: swscale.h:236
ff_updateMMXDitherTables
void ff_updateMMXDitherTables(SwsInternal *c, int dstY)
AV_PIX_FMT_RGB4_BYTE
@ AV_PIX_FMT_RGB4_BYTE
packed RGB 1:2:1, 8bpp, (msb)1R 2G 1B(lsb)
Definition: pixfmt.h:95
SwsSlice
Struct which defines a slice of an image to be scaled or an output for a scaled slice.
Definition: swscale_internal.h:1104
AV_FRAME_FLAG_INTERLACED
#define AV_FRAME_FLAG_INTERLACED
A flag to mark frames whose content is interlaced.
Definition: frame.h:650
slice_start
static int slice_start(SliceContext *sc, VVCContext *s, VVCFrameContext *fc, const CodedBitstreamUnit *unit, const int is_first_slice)
Definition: dec.c:845
ff_init_slice_from_src
int ff_init_slice_from_src(SwsSlice *s, uint8_t *const src[4], const int stride[4], int srcW, int lumY, int lumH, int chrY, int chrH, int relative)
Definition: slice.c:148
frame_ref
static int frame_ref(AVFrame *dst, const AVFrame *src)
Definition: swscale.c:1340
SWS_DITHER_ED
@ SWS_DITHER_ED
Definition: swscale.h:83
AV_PIX_FMT_PAL8
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
Definition: pixfmt.h:84
yuv2packed1_fn
void(* yuv2packed1_fn)(SwsInternal *c, const int16_t *lumSrc, const int16_t *chrUSrc[2], const int16_t *chrVSrc[2], const int16_t *alpSrc, uint8_t *dest, int dstW, int uvalpha, int y)
Write one line of horizontally scaled Y/U/V/A to packed-pixel YUV/RGB output without any additional v...
Definition: swscale_internal.h:181
SwsInternal
Definition: swscale_internal.h:317
ret
ret
Definition: filter_design.txt:187
sws_receive_slice_alignment
unsigned int sws_receive_slice_alignment(const SwsContext *sws)
Get the alignment required for slices.
Definition: swscale.c:1247
__asm__
__asm__(".macro parse_r var r\n\t" "\\var = -1\n\t" _IFC_REG(0) _IFC_REG(1) _IFC_REG(2) _IFC_REG(3) _IFC_REG(4) _IFC_REG(5) _IFC_REG(6) _IFC_REG(7) _IFC_REG(8) _IFC_REG(9) _IFC_REG(10) _IFC_REG(11) _IFC_REG(12) _IFC_REG(13) _IFC_REG(14) _IFC_REG(15) _IFC_REG(16) _IFC_REG(17) _IFC_REG(18) _IFC_REG(19) _IFC_REG(20) _IFC_REG(21) _IFC_REG(22) _IFC_REG(23) _IFC_REG(24) _IFC_REG(25) _IFC_REG(26) _IFC_REG(27) _IFC_REG(28) _IFC_REG(29) _IFC_REG(30) _IFC_REG(31) ".iflt \\var\n\t" ".error \"Unable to parse register name \\r\"\n\t" ".endif\n\t" ".endm")
bswap.h
sws_frame_start
int sws_frame_start(SwsContext *sws, AVFrame *dst, const AVFrame *src)
Initialize the scaling process for a given pair of source/destination frames.
Definition: swscale.c:1203
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:265
sws_pb_64
static const uint8_t sws_pb_64[8]
Definition: swscale.c:50
U
#define U(x)
Definition: vpx_arith.h:37
yuv2planarX_fn
void(* yuv2planarX_fn)(const int16_t *filter, int filterSize, const int16_t **src, uint8_t *dest, int dstW, const uint8_t *dither, int offset)
Write one line of horizontally scaled data to planar output with multi-point vertical scaling between...
Definition: swscale_internal.h:124
reset_ptr
static void reset_ptr(const uint8_t *src[], enum AVPixelFormat format)
Definition: swscale.c:711
ff_init_vscale_pfn
void ff_init_vscale_pfn(SwsInternal *c, yuv2planar1_fn yuv2plane1, yuv2planarX_fn yuv2planeX, yuv2interleavedX_fn yuv2nv12cX, yuv2packed1_fn yuv2packed1, yuv2packed2_fn yuv2packed2, yuv2packedX_fn yuv2packedX, yuv2anyX_fn yuv2anyX, int use_mmx)
setup vertical scaler functions
Definition: vscale.c:258
sws_scale
int attribute_align_arg sws_scale(SwsContext *sws, const uint8_t *const srcSlice[], const int srcStride[], int srcSliceY, int srcSliceH, uint8_t *const dst[], const int dstStride[])
swscale wrapper, so we don't need to export the SwsContext.
Definition: swscale.c:1503
SWS_PRINT_INFO
@ SWS_PRINT_INFO
Emit verbose log of scaling parameters.
Definition: swscale.h:119
SwsFormat::color
SwsColor color
Definition: format.h:85
lumRangeFromJpeg16_c
static void lumRangeFromJpeg16_c(int16_t *_dst, int width, uint32_t coeff, int64_t offset)
Definition: swscale.c:244
atomic_exchange_explicit
#define atomic_exchange_explicit(object, desired, order)
Definition: stdatomic.h:106
SWS_STRICT
@ SWS_STRICT
Return an error on underspecified conversions.
Definition: swscale.h:114
ff_dither_8x8_128
const uint8_t ff_dither_8x8_128[9][8]
Definition: swscale.c:38
AV_CPU_FLAG_MMXEXT
#define AV_CPU_FLAG_MMXEXT
SSE integer functions or AMD MMX ext.
Definition: cpu.h:33
Windows::Graphics::DirectX::Direct3D11::p
IDirect3DDxgiInterfaceAccess _COM_Outptr_ void ** p
Definition: vsrc_gfxcapture_winrt.hpp:53
ff_swscale
int ff_swscale(SwsInternal *c, const uint8_t *const src[], const int srcStride[], int srcSliceY, int srcSliceH, uint8_t *const dst[], const int dstStride[], int dstSliceY, int dstSliceH)
Definition: swscale.c:259
SwsFormat::csp
enum AVColorSpace csp
Definition: format.h:82
av_clip_uint8
#define av_clip_uint8
Definition: common.h:106
RU
#define RU
av_clip_uint16
#define av_clip_uint16
Definition: common.h:112
FIELD_BOTTOM
@ FIELD_BOTTOM
Definition: format.h:57
BU
#define BU
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:165
desc
const char * desc
Definition: libsvtav1.c:78
scale_gamma
static int scale_gamma(SwsInternal *c, const uint8_t *const srcSlice[], const int srcStride[], int srcSliceY, int srcSliceH, uint8_t *const dstSlice[], const int dstStride[], int dstSliceY, int dstSliceH)
Definition: swscale.c:947
mem.h
SwsPlane::sliceY
int sliceY
index of first line
Definition: swscale_internal.h:1093
SwsContext::dst_w
int dst_w
Definition: swscale.h:236
SwsInternal::dst_slice_height
int dst_slice_height
Definition: swscale_internal.h:334
SwsGraph
Filter graph, which represents a 'baked' pixel format conversion.
Definition: graph.h:108
SwsContext::src_format
int src_format
Source pixel format.
Definition: swscale.h:237
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
ff_hyscale_fast_c
void ff_hyscale_fast_c(SwsInternal *c, int16_t *dst, int dstWidth, const uint8_t *src, int srcW, int xInc)
Definition: hscale_fast_bilinear.c:23
BY
#define BY
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
ff_sws_init_output_funcs
av_cold void ff_sws_init_output_funcs(SwsInternal *c, yuv2planar1_fn *yuv2plane1, yuv2planarX_fn *yuv2planeX, yuv2interleavedX_fn *yuv2nv12cX, yuv2packed1_fn *yuv2packed1, yuv2packed2_fn *yuv2packed2, yuv2packedX_fn *yuv2packedX, yuv2anyX_fn *yuv2anyX)
Definition: output.c:3289
av_fast_malloc
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:557
ff_sws_init_swscale_aarch64
av_cold void ff_sws_init_swscale_aarch64(SwsInternal *c)
Definition: swscale.c:310
SwsInternal::dst_slice_start
int dst_slice_start
Definition: swscale_internal.h:333
int32_t
int32_t
Definition: audioconvert.c:56
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:472
coeff
static const double coeff[2][5]
Definition: vf_owdenoise.c:80
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
sws_internal
static SwsInternal * sws_internal(const SwsContext *sws)
Definition: swscale_internal.h:74
sws_scale_frame
int sws_scale_frame(SwsContext *sws, AVFrame *dst, const AVFrame *src)
Scale source data from src and write the output to dst.
Definition: swscale.c:1356
h
h
Definition: vp9dsp_template.c:2070
stride
#define stride
Definition: h264pred_template.c:536
solve_range_convert
static void solve_range_convert(uint16_t src_min, uint16_t src_max, uint16_t dst_min, uint16_t dst_max, int src_bits, int src_shift, int mult_shift, uint32_t *coeff, int64_t *offset)
Definition: swscale.c:574
isPlanar
static av_always_inline int isPlanar(enum AVPixelFormat pix_fmt)
Definition: swscale_internal.h:896
width
#define width
Definition: dsp.h:89
SwsContext
Main external API structure.
Definition: swscale.h:189
AV_PIX_FMT_FLAG_PAL
#define AV_PIX_FMT_FLAG_PAL
Pixel format has a palette in data[1], values are indexes in this palette.
Definition: pixdesc.h:120
ff_sws_graph_run
New swscale design to change SwsGraph is what coordinates multiple passes These can include cascaded scaling error diffusion and so on Or we could have separate passes for the vertical and horizontal scaling In between each SwsPass lies a fully allocated image buffer Graph passes may have different levels of e g we can have a single threaded error diffusion pass following a multi threaded scaling pass SwsGraph is internally recreated whenever the image dimensions or settings change in any way splits interlaced images into separate and calls ff_sws_graph_run() on each. From the point of view of SwsGraph itself
av_color_transfer_name
const char * av_color_transfer_name(enum AVColorTransferCharacteristic transfer)
Definition: pixdesc.c:3823
src
#define src
Definition: vp8dsp.c:248
swscale.h
SwsColor::prim
enum AVColorPrimaries prim
Definition: format.h:61
av_get_pix_fmt_name
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:3376
AV_RB16
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_WB24 unsigned int_TMPL AV_RB16
Definition: bytestream.h:98
isALPHA
static av_always_inline int isALPHA(enum AVPixelFormat pix_fmt)
Definition: swscale_internal.h:878
dither
static const uint8_t dither[8][8]
Definition: vf_fspp.c:62