FFmpeg
swscale.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2001-2011 Michael Niedermayer <michaelni@gmx.at>
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include <stdint.h>
22 #include <stdio.h>
23 #include <string.h>
24 
25 #include "libavutil/avassert.h"
26 #include "libavutil/bswap.h"
27 #include "libavutil/common.h"
28 #include "libavutil/cpu.h"
29 #include "libavutil/emms.h"
30 #include "libavutil/intreadwrite.h"
31 #include "libavutil/mem.h"
32 #include "libavutil/mem_internal.h"
33 #include "libavutil/pixdesc.h"
34 #include "libavutil/hwcontext.h"
35 #include "config.h"
36 #include "swscale_internal.h"
37 #include "swscale.h"
38 #if CONFIG_VULKAN
39 #include "vulkan/ops.h"
40 #endif
41 
42 DECLARE_ALIGNED(8, const uint8_t, ff_dither_8x8_128)[9][8] = {
43  { 36, 68, 60, 92, 34, 66, 58, 90, },
44  { 100, 4, 124, 28, 98, 2, 122, 26, },
45  { 52, 84, 44, 76, 50, 82, 42, 74, },
46  { 116, 20, 108, 12, 114, 18, 106, 10, },
47  { 32, 64, 56, 88, 38, 70, 62, 94, },
48  { 96, 0, 120, 24, 102, 6, 126, 30, },
49  { 48, 80, 40, 72, 54, 86, 46, 78, },
50  { 112, 16, 104, 8, 118, 22, 110, 14, },
51  { 36, 68, 60, 92, 34, 66, 58, 90, },
52 };
53 
54 DECLARE_ALIGNED(8, static const uint8_t, sws_pb_64)[8] = {
55  64, 64, 64, 64, 64, 64, 64, 64
56 };
57 
58 static av_always_inline void fillPlane(uint8_t *plane, int stride, int width,
59  int height, int y, uint8_t val)
60 {
61  int i;
62  uint8_t *ptr = plane + stride * y;
63  for (i = 0; i < height; i++) {
64  memset(ptr, val, width);
65  ptr += stride;
66  }
67 }
68 
69 static void hScale16To19_c(SwsInternal *c, int16_t *_dst, int dstW,
70  const uint8_t *_src, const int16_t *filter,
71  const int32_t *filterPos, int filterSize)
72 {
73  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(c->opts.src_format);
74  int i;
75  int32_t *dst = (int32_t *) _dst;
76  const uint16_t *src = (const uint16_t *) _src;
77  int bits = desc->comp[0].depth - 1;
78  int sh = bits - 4;
79 
80  if ((isAnyRGB(c->opts.src_format) || c->opts.src_format==AV_PIX_FMT_PAL8) && desc->comp[0].depth<16) {
81  sh = 9;
82  } else if (desc->flags & AV_PIX_FMT_FLAG_FLOAT) { /* float input are process like uint 16bpc */
83  sh = 16 - 1 - 4;
84  }
85 
86  for (i = 0; i < dstW; i++) {
87  int j;
88  int srcPos = filterPos[i];
89  int val = 0;
90 
91  for (j = 0; j < filterSize; j++) {
92  val += src[srcPos + j] * filter[filterSize * i + j];
93  }
94  // filter=14 bit, input=16 bit, output=30 bit, >> 11 makes 19 bit
95  dst[i] = FFMIN(val >> sh, (1 << 19) - 1);
96  }
97 }
98 
99 static void hScale16To15_c(SwsInternal *c, int16_t *dst, int dstW,
100  const uint8_t *_src, const int16_t *filter,
101  const int32_t *filterPos, int filterSize)
102 {
103  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(c->opts.src_format);
104  int i;
105  const uint16_t *src = (const uint16_t *) _src;
106  int sh = desc->comp[0].depth - 1;
107 
108  if (sh<15) {
109  sh = isAnyRGB(c->opts.src_format) || c->opts.src_format==AV_PIX_FMT_PAL8 ? 13 : (desc->comp[0].depth - 1);
110  } else if (desc->flags & AV_PIX_FMT_FLAG_FLOAT) { /* float input are process like uint 16bpc */
111  sh = 16 - 1;
112  }
113 
114  for (i = 0; i < dstW; i++) {
115  int j;
116  int srcPos = filterPos[i];
117  int val = 0;
118 
119  for (j = 0; j < filterSize; j++) {
120  val += src[srcPos + j] * filter[filterSize * i + j];
121  }
122  // filter=14 bit, input=16 bit, output=30 bit, >> 15 makes 15 bit
123  dst[i] = FFMIN(val >> sh, (1 << 15) - 1);
124  }
125 }
126 
127 // bilinear / bicubic scaling
128 static void hScale8To15_c(SwsInternal *c, int16_t *dst, int dstW,
129  const uint8_t *src, const int16_t *filter,
130  const int32_t *filterPos, int filterSize)
131 {
132  int i;
133  for (i = 0; i < dstW; i++) {
134  int j;
135  int srcPos = filterPos[i];
136  int val = 0;
137  for (j = 0; j < filterSize; j++) {
138  val += ((int)src[srcPos + j]) * filter[filterSize * i + j];
139  }
140  dst[i] = FFMIN(val >> 7, (1 << 15) - 1); // the cubic equation does overflow ...
141  }
142 }
143 
144 static void hScale8To19_c(SwsInternal *c, int16_t *_dst, int dstW,
145  const uint8_t *src, const int16_t *filter,
146  const int32_t *filterPos, int filterSize)
147 {
148  int i;
149  int32_t *dst = (int32_t *) _dst;
150  for (i = 0; i < dstW; i++) {
151  int j;
152  int srcPos = filterPos[i];
153  int val = 0;
154  for (j = 0; j < filterSize; j++) {
155  val += ((int)src[srcPos + j]) * filter[filterSize * i + j];
156  }
157  dst[i] = FFMIN(val >> 3, (1 << 19) - 1); // the cubic equation does overflow ...
158  }
159 }
160 
161 // FIXME all pal and rgb srcFormats could do this conversion as well
162 // FIXME all scalers more complex than bilinear could do half of this transform
163 static void chrRangeToJpeg_c(int16_t *dstU, int16_t *dstV, int width,
164  uint32_t _coeff, int64_t _offset)
165 {
166  uint16_t coeff = _coeff;
167  int32_t offset = _offset;
168  int i;
169  for (i = 0; i < width; i++) {
170  int U = (dstU[i] * coeff + offset) >> 14;
171  int V = (dstV[i] * coeff + offset) >> 14;
172  dstU[i] = FFMIN(U, (1 << 15) - 1);
173  dstV[i] = FFMIN(V, (1 << 15) - 1);
174  }
175 }
176 
177 static void chrRangeFromJpeg_c(int16_t *dstU, int16_t *dstV, int width,
178  uint32_t _coeff, int64_t _offset)
179 {
180  uint16_t coeff = _coeff;
181  int32_t offset = _offset;
182  int i;
183  for (i = 0; i < width; i++) {
184  dstU[i] = (dstU[i] * coeff + offset) >> 14;
185  dstV[i] = (dstV[i] * coeff + offset) >> 14;
186  }
187 }
188 
189 static void lumRangeToJpeg_c(int16_t *dst, int width,
190  uint32_t _coeff, int64_t _offset)
191 {
192  uint16_t coeff = _coeff;
193  int32_t offset = _offset;
194  int i;
195  for (i = 0; i < width; i++) {
196  int Y = (dst[i] * coeff + offset) >> 14;
197  dst[i] = FFMIN(Y, (1 << 15) - 1);
198  }
199 }
200 
201 static void lumRangeFromJpeg_c(int16_t *dst, int width,
202  uint32_t _coeff, int64_t _offset)
203 {
204  uint16_t coeff = _coeff;
205  int32_t offset = _offset;
206  int i;
207  for (i = 0; i < width; i++)
208  dst[i] = (dst[i] * coeff + offset) >> 14;
209 }
210 
211 static void chrRangeToJpeg16_c(int16_t *_dstU, int16_t *_dstV, int width,
212  uint32_t coeff, int64_t offset)
213 {
214  int i;
215  int32_t *dstU = (int32_t *) _dstU;
216  int32_t *dstV = (int32_t *) _dstV;
217  for (i = 0; i < width; i++) {
218  int U = ((int64_t) dstU[i] * coeff + offset) >> 18;
219  int V = ((int64_t) dstV[i] * coeff + offset) >> 18;
220  dstU[i] = FFMIN(U, (1 << 19) - 1);
221  dstV[i] = FFMIN(V, (1 << 19) - 1);
222  }
223 }
224 
225 static void chrRangeFromJpeg16_c(int16_t *_dstU, int16_t *_dstV, int width,
226  uint32_t coeff, int64_t offset)
227 {
228  int i;
229  int32_t *dstU = (int32_t *) _dstU;
230  int32_t *dstV = (int32_t *) _dstV;
231  for (i = 0; i < width; i++) {
232  dstU[i] = ((int64_t) dstU[i] * coeff + offset) >> 18;
233  dstV[i] = ((int64_t) dstV[i] * coeff + offset) >> 18;
234  }
235 }
236 
237 static void lumRangeToJpeg16_c(int16_t *_dst, int width,
238  uint32_t coeff, int64_t offset)
239 {
240  int i;
241  int32_t *dst = (int32_t *) _dst;
242  for (i = 0; i < width; i++) {
243  int Y = ((int64_t) dst[i] * coeff + offset) >> 18;
244  dst[i] = FFMIN(Y, (1 << 19) - 1);
245  }
246 }
247 
248 static void lumRangeFromJpeg16_c(int16_t *_dst, int width,
249  uint32_t coeff, int64_t offset)
250 {
251  int i;
252  int32_t *dst = (int32_t *) _dst;
253  for (i = 0; i < width; i++)
254  dst[i] = ((int64_t) dst[i] * coeff + offset) >> 18;
255 }
256 
257 
258 #define DEBUG_SWSCALE_BUFFERS 0
259 #define DEBUG_BUFFERS(...) \
260  if (DEBUG_SWSCALE_BUFFERS) \
261  av_log(c, AV_LOG_DEBUG, __VA_ARGS__)
262 
263 int ff_swscale(SwsInternal *c, const uint8_t *const src[], const int srcStride[],
264  int srcSliceY, int srcSliceH, uint8_t *const dst[],
265  const int dstStride[], int dstSliceY, int dstSliceH)
266 {
267  const int scale_dst = dstSliceY > 0 || dstSliceH < c->opts.dst_h;
268 
269  /* load a few things into local vars to make the code more readable?
270  * and faster */
271  const int dstW = c->opts.dst_w;
272  int dstH = c->opts.dst_h;
273 
274  const enum AVPixelFormat dstFormat = c->opts.dst_format;
275  const int flags = c->opts.flags;
276  int32_t *vLumFilterPos = c->vLumFilterPos;
277  int32_t *vChrFilterPos = c->vChrFilterPos;
278 
279  const int vLumFilterSize = c->vLumFilterSize;
280  const int vChrFilterSize = c->vChrFilterSize;
281 
282  yuv2planar1_fn yuv2plane1 = c->yuv2plane1;
283  yuv2planarX_fn yuv2planeX = c->yuv2planeX;
284  yuv2interleavedX_fn yuv2nv12cX = c->yuv2nv12cX;
285  yuv2packed1_fn yuv2packed1 = c->yuv2packed1;
286  yuv2packed2_fn yuv2packed2 = c->yuv2packed2;
287  yuv2packedX_fn yuv2packedX = c->yuv2packedX;
288  yuv2anyX_fn yuv2anyX = c->yuv2anyX;
289  const int chrSrcSliceY = srcSliceY >> c->chrSrcVSubSample;
290  const int chrSrcSliceH = AV_CEIL_RSHIFT(srcSliceH, c->chrSrcVSubSample);
291  int should_dither = isNBPS(c->opts.src_format) ||
292  is16BPS(c->opts.src_format);
293  int lastDstY;
294 
295  /* vars which will change and which we need to store back in the context */
296  int dstY = c->dstY;
297  int lastInLumBuf = c->lastInLumBuf;
298  int lastInChrBuf = c->lastInChrBuf;
299 
300  int lumStart = 0;
301  int lumEnd = c->descIndex[0];
302  int chrStart = lumEnd;
303  int chrEnd = c->descIndex[1];
304  int vStart = chrEnd;
305  int vEnd = c->numDesc;
306  SwsSlice *src_slice = &c->slice[lumStart];
307  SwsSlice *hout_slice = &c->slice[c->numSlice-2];
308  SwsSlice *vout_slice = &c->slice[c->numSlice-1];
309  SwsFilterDescriptor *desc = c->desc;
310 
311  int needAlpha = c->needAlpha;
312 
313  int hasLumHoles = 1;
314  int hasChrHoles = 1;
315 
316  const uint8_t *src2[4];
317  int srcStride2[4];
318 
319  if (isPacked(c->opts.src_format)) {
320  src2[0] =
321  src2[1] =
322  src2[2] =
323  src2[3] = src[0];
324  srcStride2[0] =
325  srcStride2[1] =
326  srcStride2[2] =
327  srcStride2[3] = srcStride[0];
328  } else {
329  memcpy(src2, src, sizeof(src2));
330  memcpy(srcStride2, srcStride, sizeof(srcStride2));
331  }
332 
333  srcStride2[1] *= 1 << c->vChrDrop;
334  srcStride2[2] *= 1 << c->vChrDrop;
335 
336  DEBUG_BUFFERS("swscale() %p[%d] %p[%d] %p[%d] %p[%d] -> %p[%d] %p[%d] %p[%d] %p[%d]\n",
337  src2[0], srcStride2[0], src2[1], srcStride2[1],
338  src2[2], srcStride2[2], src2[3], srcStride2[3],
339  dst[0], dstStride[0], dst[1], dstStride[1],
340  dst[2], dstStride[2], dst[3], dstStride[3]);
341  DEBUG_BUFFERS("srcSliceY: %d srcSliceH: %d dstY: %d dstH: %d\n",
342  srcSliceY, srcSliceH, dstY, dstH);
343  DEBUG_BUFFERS("vLumFilterSize: %d vChrFilterSize: %d\n",
344  vLumFilterSize, vChrFilterSize);
345 
346  if (dstStride[0]&15 || dstStride[1]&15 ||
347  dstStride[2]&15 || dstStride[3]&15) {
348  SwsInternal *const ctx = c->parent ? sws_internal(c->parent) : c;
349  if (flags & SWS_PRINT_INFO &&
350  !atomic_exchange_explicit(&ctx->stride_unaligned_warned, 1, memory_order_relaxed)) {
352  "Warning: dstStride is not aligned!\n"
353  " ->cannot do aligned memory accesses anymore\n");
354  }
355  }
356 
357 #if ARCH_X86
358  if ( (uintptr_t) dst[0]&15 || (uintptr_t) dst[1]&15 || (uintptr_t) dst[2]&15
359  || (uintptr_t)src2[0]&15 || (uintptr_t)src2[1]&15 || (uintptr_t)src2[2]&15
360  || dstStride[0]&15 || dstStride[1]&15 || dstStride[2]&15 || dstStride[3]&15
361  || srcStride2[0]&15 || srcStride2[1]&15 || srcStride2[2]&15 || srcStride2[3]&15
362  ) {
363  SwsInternal *const ctx = c->parent ? sws_internal(c->parent) : c;
364  int cpu_flags = av_get_cpu_flags();
365  if (flags & SWS_PRINT_INFO && HAVE_MMXEXT && (cpu_flags & AV_CPU_FLAG_SSE2) &&
366  !atomic_exchange_explicit(&ctx->stride_unaligned_warned,1, memory_order_relaxed)) {
367  av_log(c, AV_LOG_WARNING, "Warning: data is not aligned! This can lead to a speed loss\n");
368  }
369  }
370 #endif
371 
372  if (scale_dst) {
373  dstY = dstSliceY;
374  dstH = dstY + dstSliceH;
375  lastInLumBuf = -1;
376  lastInChrBuf = -1;
377  } else if (srcSliceY == 0) {
378  /* Note the user might start scaling the picture in the middle so this
379  * will not get executed. This is not really intended but works
380  * currently, so people might do it. */
381  dstY = 0;
382  lastInLumBuf = -1;
383  lastInChrBuf = -1;
384  }
385 
386  if (!should_dither) {
387  c->chrDither8 = c->lumDither8 = sws_pb_64;
388  }
389  lastDstY = dstY;
390 
391  ff_init_vscale_pfn(c, yuv2plane1, yuv2planeX, yuv2nv12cX,
392  yuv2packed1, yuv2packed2, yuv2packedX, yuv2anyX, c->use_mmx_vfilter);
393 
394  ff_init_slice_from_src(src_slice, (uint8_t**)src2, srcStride2, c->opts.src_w,
395  srcSliceY, srcSliceH, chrSrcSliceY, chrSrcSliceH, 1);
396 
397  ff_init_slice_from_src(vout_slice, (uint8_t**)dst, dstStride, c->opts.dst_w,
398  dstY, dstSliceH, dstY >> c->chrDstVSubSample,
399  AV_CEIL_RSHIFT(dstSliceH, c->chrDstVSubSample), scale_dst);
400  if (srcSliceY == 0) {
401  hout_slice->plane[0].sliceY = lastInLumBuf + 1;
402  hout_slice->plane[1].sliceY = lastInChrBuf + 1;
403  hout_slice->plane[2].sliceY = lastInChrBuf + 1;
404  hout_slice->plane[3].sliceY = lastInLumBuf + 1;
405 
406  hout_slice->plane[0].sliceH =
407  hout_slice->plane[1].sliceH =
408  hout_slice->plane[2].sliceH =
409  hout_slice->plane[3].sliceH = 0;
410  hout_slice->width = dstW;
411  }
412 
413  for (; dstY < dstH; dstY++) {
414  const int chrDstY = dstY >> c->chrDstVSubSample;
415  int use_mmx_vfilter= c->use_mmx_vfilter;
416 
417  // First line needed as input
418  const int firstLumSrcY = FFMAX(1 - vLumFilterSize, vLumFilterPos[dstY]);
419  const int firstLumSrcY2 = FFMAX(1 - vLumFilterSize, vLumFilterPos[FFMIN(dstY | ((1 << c->chrDstVSubSample) - 1), c->opts.dst_h - 1)]);
420  // First line needed as input
421  const int firstChrSrcY = FFMAX(1 - vChrFilterSize, vChrFilterPos[chrDstY]);
422 
423  // Last line needed as input
424  int lastLumSrcY = FFMIN(c->opts.src_h, firstLumSrcY + vLumFilterSize) - 1;
425  int lastLumSrcY2 = FFMIN(c->opts.src_h, firstLumSrcY2 + vLumFilterSize) - 1;
426  int lastChrSrcY = FFMIN(c->chrSrcH, firstChrSrcY + vChrFilterSize) - 1;
427  int enough_lines;
428 
429  int i;
430  int posY, cPosY, firstPosY, lastPosY, firstCPosY, lastCPosY;
431 
432  // handle holes (FAST_BILINEAR & weird filters)
433  if (firstLumSrcY > lastInLumBuf) {
434 
435  hasLumHoles = lastInLumBuf != firstLumSrcY - 1;
436  if (hasLumHoles) {
437  hout_slice->plane[0].sliceY = firstLumSrcY;
438  hout_slice->plane[3].sliceY = firstLumSrcY;
439  hout_slice->plane[0].sliceH =
440  hout_slice->plane[3].sliceH = 0;
441  }
442 
443  lastInLumBuf = firstLumSrcY - 1;
444  }
445  if (firstChrSrcY > lastInChrBuf) {
446 
447  hasChrHoles = lastInChrBuf != firstChrSrcY - 1;
448  if (hasChrHoles) {
449  hout_slice->plane[1].sliceY = firstChrSrcY;
450  hout_slice->plane[2].sliceY = firstChrSrcY;
451  hout_slice->plane[1].sliceH =
452  hout_slice->plane[2].sliceH = 0;
453  }
454 
455  lastInChrBuf = firstChrSrcY - 1;
456  }
457 
458  DEBUG_BUFFERS("dstY: %d\n", dstY);
459  DEBUG_BUFFERS("\tfirstLumSrcY: %d lastLumSrcY: %d lastInLumBuf: %d\n",
460  firstLumSrcY, lastLumSrcY, lastInLumBuf);
461  DEBUG_BUFFERS("\tfirstChrSrcY: %d lastChrSrcY: %d lastInChrBuf: %d\n",
462  firstChrSrcY, lastChrSrcY, lastInChrBuf);
463 
464  // Do we have enough lines in this slice to output the dstY line
465  enough_lines = lastLumSrcY2 < srcSliceY + srcSliceH &&
466  lastChrSrcY < AV_CEIL_RSHIFT(srcSliceY + srcSliceH, c->chrSrcVSubSample);
467 
468  if (!enough_lines) {
469  lastLumSrcY = srcSliceY + srcSliceH - 1;
470  lastChrSrcY = chrSrcSliceY + chrSrcSliceH - 1;
471  DEBUG_BUFFERS("buffering slice: lastLumSrcY %d lastChrSrcY %d\n",
472  lastLumSrcY, lastChrSrcY);
473  }
474 
475  av_assert0((lastLumSrcY - firstLumSrcY + 1) <= hout_slice->plane[0].available_lines);
476  av_assert0((lastChrSrcY - firstChrSrcY + 1) <= hout_slice->plane[1].available_lines);
477 
478 
479  posY = hout_slice->plane[0].sliceY + hout_slice->plane[0].sliceH;
480  if (posY <= lastLumSrcY && !hasLumHoles) {
481  firstPosY = FFMAX(firstLumSrcY, posY);
482  lastPosY = FFMIN(firstLumSrcY + hout_slice->plane[0].available_lines - 1, srcSliceY + srcSliceH - 1);
483  } else {
484  firstPosY = posY;
485  lastPosY = lastLumSrcY;
486  }
487 
488  cPosY = hout_slice->plane[1].sliceY + hout_slice->plane[1].sliceH;
489  if (cPosY <= lastChrSrcY && !hasChrHoles) {
490  firstCPosY = FFMAX(firstChrSrcY, cPosY);
491  lastCPosY = FFMIN(firstChrSrcY + hout_slice->plane[1].available_lines - 1, AV_CEIL_RSHIFT(srcSliceY + srcSliceH, c->chrSrcVSubSample) - 1);
492  } else {
493  firstCPosY = cPosY;
494  lastCPosY = lastChrSrcY;
495  }
496 
497  ff_rotate_slice(hout_slice, lastPosY, lastCPosY);
498 
499  if (posY < lastLumSrcY + 1) {
500  for (i = lumStart; i < lumEnd; ++i)
501  desc[i].process(c, &desc[i], firstPosY, lastPosY - firstPosY + 1);
502  }
503 
504  lastInLumBuf = lastLumSrcY;
505 
506  if (cPosY < lastChrSrcY + 1) {
507  for (i = chrStart; i < chrEnd; ++i)
508  desc[i].process(c, &desc[i], firstCPosY, lastCPosY - firstCPosY + 1);
509  }
510 
511  lastInChrBuf = lastChrSrcY;
512 
513  if (!enough_lines)
514  break; // we can't output a dstY line so let's try with the next slice
515 
516 #if HAVE_MMX_INLINE
518  c->dstW_mmx = c->opts.dst_w;
519 #endif
520  if (should_dither) {
521  c->chrDither8 = ff_dither_8x8_128[chrDstY & 7];
522  c->lumDither8 = ff_dither_8x8_128[dstY & 7];
523  }
524  if (dstY >= c->opts.dst_h - 2) {
525  /* hmm looks like we can't use MMX here without overwriting
526  * this array's tail */
527  ff_sws_init_output_funcs(c, &yuv2plane1, &yuv2planeX, &yuv2nv12cX,
528  &yuv2packed1, &yuv2packed2, &yuv2packedX, &yuv2anyX);
529  use_mmx_vfilter= 0;
530  ff_init_vscale_pfn(c, yuv2plane1, yuv2planeX, yuv2nv12cX,
531  yuv2packed1, yuv2packed2, yuv2packedX, yuv2anyX, use_mmx_vfilter);
532  }
533 
534  for (i = vStart; i < vEnd; ++i)
535  desc[i].process(c, &desc[i], dstY, 1);
536  }
537  if (isPlanar(dstFormat) && isALPHA(dstFormat) && !needAlpha) {
538  int offset = lastDstY - dstSliceY;
539  int length = dstW;
540  int height = dstY - lastDstY;
541 
542  if (is16BPS(dstFormat) || isNBPS(dstFormat)) {
543  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(dstFormat);
544  fillPlane16(dst[3], dstStride[3], length, height, offset,
545  1, desc->comp[3].depth,
546  isBE(dstFormat));
547  } else if (is32BPS(dstFormat)) {
548  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(dstFormat);
549  fillPlane32(dst[3], dstStride[3], length, height, offset,
550  1, desc->comp[3].depth,
551  isBE(dstFormat), desc->flags & AV_PIX_FMT_FLAG_FLOAT);
552  } else
553  fillPlane(dst[3], dstStride[3], length, height, offset, 255);
554  }
555 
556 #if HAVE_MMXEXT_INLINE
558  __asm__ volatile ("sfence" ::: "memory");
559 #endif
560  emms_c();
561 
562  /* store changed local vars back in the context */
563  c->dstY = dstY;
564  c->lastInLumBuf = lastInLumBuf;
565  c->lastInChrBuf = lastInChrBuf;
566 
567  return dstY - lastDstY;
568 }
569 
570 /*
571  * Solve for coeff and offset:
572  * dst = ((src << src_shift) * coeff + offset) >> (mult_shift + src_shift)
573  *
574  * If SwsInternal->dstBpc is > 14, coeff is uint16_t and offset is int32_t,
575  * otherwise (SwsInternal->dstBpc is <= 14) coeff is uint32_t and offset is
576  * int64_t.
577  */
578 static void solve_range_convert(uint16_t src_min, uint16_t src_max,
579  uint16_t dst_min, uint16_t dst_max,
580  int src_bits, int src_shift, int mult_shift,
581  uint32_t *coeff, int64_t *offset)
582 {
583  uint16_t src_range = src_max - src_min;
584  uint16_t dst_range = dst_max - dst_min;
585  int total_shift = mult_shift + src_shift;
586  *coeff = AV_CEIL_RSHIFT(((uint64_t) dst_range << total_shift) / src_range, src_shift);
587  *offset = ((int64_t) dst_max << total_shift) -
588  ((int64_t) src_max << src_shift) * *coeff +
589  (1U << (mult_shift - 1));
590 }
591 
593 {
594  const int bit_depth = c->dstBpc ? FFMIN(c->dstBpc, 16) : 8;
595  const int src_bits = bit_depth <= 14 ? 15 : 19;
596  const int src_shift = src_bits - bit_depth;
597  const int mult_shift = bit_depth <= 14 ? 14 : 18;
598  const uint16_t mpeg_min = 16U << (bit_depth - 8);
599  const uint16_t mpeg_max_lum = 235U << (bit_depth - 8);
600  const uint16_t mpeg_max_chr = 240U << (bit_depth - 8);
601  const uint16_t jpeg_max = (1U << bit_depth) - 1;
602  uint16_t src_min, src_max_lum, src_max_chr;
603  uint16_t dst_min, dst_max_lum, dst_max_chr;
604  if (c->opts.src_range) {
605  src_min = 0;
606  src_max_lum = jpeg_max;
607  src_max_chr = jpeg_max;
608  dst_min = mpeg_min;
609  dst_max_lum = mpeg_max_lum;
610  dst_max_chr = mpeg_max_chr;
611  } else {
612  src_min = mpeg_min;
613  src_max_lum = mpeg_max_lum;
614  src_max_chr = mpeg_max_chr;
615  dst_min = 0;
616  dst_max_lum = jpeg_max;
617  dst_max_chr = jpeg_max;
618  }
619  solve_range_convert(src_min, src_max_lum, dst_min, dst_max_lum,
620  src_bits, src_shift, mult_shift,
621  &c->lumConvertRange_coeff, &c->lumConvertRange_offset);
622  solve_range_convert(src_min, src_max_chr, dst_min, dst_max_chr,
623  src_bits, src_shift, mult_shift,
624  &c->chrConvertRange_coeff, &c->chrConvertRange_offset);
625 }
626 
628 {
629  c->lumConvertRange = NULL;
630  c->chrConvertRange = NULL;
631  if (c->opts.src_range != c->opts.dst_range && !isAnyRGB(c->opts.dst_format) && c->dstBpc < 32) {
633  if (c->dstBpc <= 14) {
634  if (c->opts.src_range) {
635  c->lumConvertRange = lumRangeFromJpeg_c;
636  c->chrConvertRange = chrRangeFromJpeg_c;
637  } else {
638  c->lumConvertRange = lumRangeToJpeg_c;
639  c->chrConvertRange = chrRangeToJpeg_c;
640  }
641  } else {
642  if (c->opts.src_range) {
643  c->lumConvertRange = lumRangeFromJpeg16_c;
644  c->chrConvertRange = chrRangeFromJpeg16_c;
645  } else {
646  c->lumConvertRange = lumRangeToJpeg16_c;
647  c->chrConvertRange = chrRangeToJpeg16_c;
648  }
649  }
650 
651 #if ARCH_AARCH64
653 #elif ARCH_LOONGARCH64
655 #elif ARCH_RISCV
657 #elif ARCH_X86
659 #endif
660  }
661 }
662 
664 {
665  enum AVPixelFormat srcFormat = c->opts.src_format;
666 
668 
669  ff_sws_init_output_funcs(c, &c->yuv2plane1, &c->yuv2planeX,
670  &c->yuv2nv12cX, &c->yuv2packed1,
671  &c->yuv2packed2, &c->yuv2packedX, &c->yuv2anyX);
672 
673  ff_sws_init_input_funcs(c, &c->lumToYV12, &c->alpToYV12, &c->chrToYV12,
674  &c->readLumPlanar, &c->readAlpPlanar, &c->readChrPlanar);
675 
676  if (c->srcBpc == 8) {
677  if (c->dstBpc <= 14) {
678  c->hyScale = c->hcScale = hScale8To15_c;
679  if (c->opts.flags & SWS_FAST_BILINEAR) {
680  c->hyscale_fast = ff_hyscale_fast_c;
681  c->hcscale_fast = ff_hcscale_fast_c;
682  }
683  } else {
684  c->hyScale = c->hcScale = hScale8To19_c;
685  }
686  } else {
687  c->hyScale = c->hcScale = c->dstBpc > 14 ? hScale16To19_c
688  : hScale16To15_c;
689  }
690 
692 
693  if (!(isGray(srcFormat) || isGray(c->opts.dst_format) ||
694  srcFormat == AV_PIX_FMT_MONOBLACK || srcFormat == AV_PIX_FMT_MONOWHITE))
695  c->needs_hcscale = 1;
696 }
697 
699 {
701 
702 #if ARCH_PPC
704 #elif ARCH_X86
706 #elif ARCH_AARCH64
708 #elif ARCH_ARM
710 #elif ARCH_LOONGARCH64
712 #elif ARCH_RISCV
714 #endif
715 }
716 
717 static void reset_ptr(const uint8_t *src[], enum AVPixelFormat format)
718 {
719  if (!isALPHA(format))
720  src[3] = NULL;
721  if (!isPlanar(format)) {
722  src[3] = src[2] = NULL;
723 
724  if (!usePal(format))
725  src[1] = NULL;
726  }
727 }
728 
729 static int check_image_pointers(const uint8_t * const data[4], enum AVPixelFormat pix_fmt,
730  const int linesizes[4])
731 {
733  int i;
734 
735  av_assert2(desc);
736 
737  for (i = 0; i < 4; i++) {
738  int plane = desc->comp[i].plane;
739  if (!data[plane] || !linesizes[plane])
740  return 0;
741  }
742 
743  return 1;
744 }
745 
746 static void xyz12Torgb48_c(const SwsInternal *c, uint8_t *dst, int dst_stride,
747  const uint8_t *src, int src_stride, int w, int h)
748 {
749  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(c->opts.src_format);
750 
751  for (int yp = 0; yp < h; yp++) {
752  const uint16_t *src16 = (const uint16_t *) src;
753  uint16_t *dst16 = (uint16_t *) dst;
754 
755  for (int xp = 0; xp < 3 * w; xp += 3) {
756  int x, y, z, r, g, b;
757 
758  if (desc->flags & AV_PIX_FMT_FLAG_BE) {
759  x = AV_RB16(src16 + xp + 0);
760  y = AV_RB16(src16 + xp + 1);
761  z = AV_RB16(src16 + xp + 2);
762  } else {
763  x = AV_RL16(src16 + xp + 0);
764  y = AV_RL16(src16 + xp + 1);
765  z = AV_RL16(src16 + xp + 2);
766  }
767 
768  x = c->xyz2rgb.gamma.in[x >> 4];
769  y = c->xyz2rgb.gamma.in[y >> 4];
770  z = c->xyz2rgb.gamma.in[z >> 4];
771 
772  // convert from XYZlinear to sRGBlinear
773  r = c->xyz2rgb.mat[0][0] * x +
774  c->xyz2rgb.mat[0][1] * y +
775  c->xyz2rgb.mat[0][2] * z >> 12;
776  g = c->xyz2rgb.mat[1][0] * x +
777  c->xyz2rgb.mat[1][1] * y +
778  c->xyz2rgb.mat[1][2] * z >> 12;
779  b = c->xyz2rgb.mat[2][0] * x +
780  c->xyz2rgb.mat[2][1] * y +
781  c->xyz2rgb.mat[2][2] * z >> 12;
782 
783  // limit values to 16-bit depth
784  r = av_clip_uint16(r);
785  g = av_clip_uint16(g);
786  b = av_clip_uint16(b);
787 
788  // convert from sRGBlinear to RGB and scale from 12bit to 16bit
789  if (desc->flags & AV_PIX_FMT_FLAG_BE) {
790  AV_WB16(dst16 + xp + 0, c->xyz2rgb.gamma.out[r] << 4);
791  AV_WB16(dst16 + xp + 1, c->xyz2rgb.gamma.out[g] << 4);
792  AV_WB16(dst16 + xp + 2, c->xyz2rgb.gamma.out[b] << 4);
793  } else {
794  AV_WL16(dst16 + xp + 0, c->xyz2rgb.gamma.out[r] << 4);
795  AV_WL16(dst16 + xp + 1, c->xyz2rgb.gamma.out[g] << 4);
796  AV_WL16(dst16 + xp + 2, c->xyz2rgb.gamma.out[b] << 4);
797  }
798  }
799 
800  src += src_stride;
801  dst += dst_stride;
802  }
803 }
804 
805 static void rgb48Toxyz12_c(const SwsInternal *c, uint8_t *dst, int dst_stride,
806  const uint8_t *src, int src_stride, int w, int h)
807 {
808  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(c->opts.dst_format);
809 
810  for (int yp = 0; yp < h; yp++) {
811  uint16_t *src16 = (uint16_t *) src;
812  uint16_t *dst16 = (uint16_t *) dst;
813 
814  for (int xp = 0; xp < 3 * w; xp += 3) {
815  int x, y, z, r, g, b;
816 
817  if (desc->flags & AV_PIX_FMT_FLAG_BE) {
818  r = AV_RB16(src16 + xp + 0);
819  g = AV_RB16(src16 + xp + 1);
820  b = AV_RB16(src16 + xp + 2);
821  } else {
822  r = AV_RL16(src16 + xp + 0);
823  g = AV_RL16(src16 + xp + 1);
824  b = AV_RL16(src16 + xp + 2);
825  }
826 
827  r = c->rgb2xyz.gamma.in[r >> 4];
828  g = c->rgb2xyz.gamma.in[g >> 4];
829  b = c->rgb2xyz.gamma.in[b >> 4];
830 
831  // convert from sRGBlinear to XYZlinear
832  x = c->rgb2xyz.mat[0][0] * r +
833  c->rgb2xyz.mat[0][1] * g +
834  c->rgb2xyz.mat[0][2] * b >> 12;
835  y = c->rgb2xyz.mat[1][0] * r +
836  c->rgb2xyz.mat[1][1] * g +
837  c->rgb2xyz.mat[1][2] * b >> 12;
838  z = c->rgb2xyz.mat[2][0] * r +
839  c->rgb2xyz.mat[2][1] * g +
840  c->rgb2xyz.mat[2][2] * b >> 12;
841 
842  // limit values to 16-bit depth
843  x = av_clip_uint16(x);
844  y = av_clip_uint16(y);
845  z = av_clip_uint16(z);
846 
847  // convert from XYZlinear to X'Y'Z' and scale from 12bit to 16bit
848  if (desc->flags & AV_PIX_FMT_FLAG_BE) {
849  AV_WB16(dst16 + xp + 0, c->rgb2xyz.gamma.out[x] << 4);
850  AV_WB16(dst16 + xp + 1, c->rgb2xyz.gamma.out[y] << 4);
851  AV_WB16(dst16 + xp + 2, c->rgb2xyz.gamma.out[z] << 4);
852  } else {
853  AV_WL16(dst16 + xp + 0, c->rgb2xyz.gamma.out[x] << 4);
854  AV_WL16(dst16 + xp + 1, c->rgb2xyz.gamma.out[y] << 4);
855  AV_WL16(dst16 + xp + 2, c->rgb2xyz.gamma.out[z] << 4);
856  }
857  }
858 
859  src += src_stride;
860  dst += dst_stride;
861  }
862 }
863 
865 {
866  c->xyz12Torgb48 = xyz12Torgb48_c;
867  c->rgb48Toxyz12 = rgb48Toxyz12_c;
868 
869 #if ARCH_AARCH64
871 #endif
872 }
873 
874 void ff_update_palette(SwsInternal *c, const uint32_t *pal)
875 {
876  uint32_t *rgb2yuv = c->input_rgb2yuv_table;
877 
878  int32_t ry = rgb2yuv[RY_IDX], gy = rgb2yuv[GY_IDX], by = rgb2yuv[BY_IDX];
879  int32_t ru = rgb2yuv[RU_IDX], gu = rgb2yuv[GU_IDX], bu = rgb2yuv[BU_IDX];
880  int32_t rv = rgb2yuv[RV_IDX], gv = rgb2yuv[GV_IDX], bv = rgb2yuv[BV_IDX];
881 
882  for (int i = 0; i < 256; i++) {
883  int r, g, b, y, u, v, a = 0xff;
884  if (c->opts.src_format == AV_PIX_FMT_PAL8) {
885  uint32_t p = pal[i];
886  a = (p >> 24) & 0xFF;
887  r = (p >> 16) & 0xFF;
888  g = (p >> 8) & 0xFF;
889  b = p & 0xFF;
890  } else if (c->opts.src_format == AV_PIX_FMT_RGB8) {
891  r = ( i >> 5 ) * 36;
892  g = ((i >> 2) & 7) * 36;
893  b = ( i & 3) * 85;
894  } else if (c->opts.src_format == AV_PIX_FMT_BGR8) {
895  b = ( i >> 6 ) * 85;
896  g = ((i >> 3) & 7) * 36;
897  r = ( i & 7) * 36;
898  } else if (c->opts.src_format == AV_PIX_FMT_RGB4_BYTE) {
899  r = ( i >> 3 ) * 255;
900  g = ((i >> 1) & 3) * 85;
901  b = ( i & 1) * 255;
902  } else if (c->opts.src_format == AV_PIX_FMT_GRAY8 || c->opts.src_format == AV_PIX_FMT_GRAY8A) {
903  r = g = b = i;
904  } else {
905  av_assert1(c->opts.src_format == AV_PIX_FMT_BGR4_BYTE);
906  b = ( i >> 3 ) * 255;
907  g = ((i >> 1) & 3) * 85;
908  r = ( i & 1) * 255;
909  }
910 
911  y = av_clip_uint8((ry * r + gy * g + by * b + ( 33 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT);
912  u = av_clip_uint8((ru * r + gu * g + bu * b + (257 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT);
913  v = av_clip_uint8((rv * r + gv * g + bv * b + (257 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT);
914 
915  c->pal_yuv[i]= y + (u<<8) + (v<<16) + ((unsigned)a<<24);
916 
917  switch (c->opts.dst_format) {
918  case AV_PIX_FMT_BGR32:
919 #if !HAVE_BIGENDIAN
920  case AV_PIX_FMT_RGB24:
921 #endif
922  c->pal_rgb[i]= r + (g<<8) + (b<<16) + ((unsigned)a<<24);
923  break;
924  case AV_PIX_FMT_BGR32_1:
925 #if HAVE_BIGENDIAN
926  case AV_PIX_FMT_BGR24:
927 #endif
928  c->pal_rgb[i]= a + (r<<8) + (g<<16) + ((unsigned)b<<24);
929  break;
930  case AV_PIX_FMT_RGB32_1:
931 #if HAVE_BIGENDIAN
932  case AV_PIX_FMT_RGB24:
933 #endif
934  c->pal_rgb[i]= a + (b<<8) + (g<<16) + ((unsigned)r<<24);
935  break;
936  case AV_PIX_FMT_GBRP:
937  case AV_PIX_FMT_GBRAP:
938 #if HAVE_BIGENDIAN
939  c->pal_rgb[i]= a + (r<<8) + (b<<16) + ((unsigned)g<<24);
940 #else
941  c->pal_rgb[i]= g + (b<<8) + (r<<16) + ((unsigned)a<<24);
942 #endif
943  break;
944  case AV_PIX_FMT_RGB32:
945 #if !HAVE_BIGENDIAN
946  case AV_PIX_FMT_BGR24:
947 #endif
948  default:
949  c->pal_rgb[i]= b + (g<<8) + (r<<16) + ((unsigned)a<<24);
950  }
951  }
952 }
953 
954 static int scale_internal(SwsContext *sws,
955  const uint8_t * const srcSlice[], const int srcStride[],
956  int srcSliceY, int srcSliceH,
957  uint8_t *const dstSlice[], const int dstStride[],
958  int dstSliceY, int dstSliceH);
959 
961  const uint8_t * const srcSlice[], const int srcStride[],
962  int srcSliceY, int srcSliceH,
963  uint8_t * const dstSlice[], const int dstStride[],
964  int dstSliceY, int dstSliceH)
965 {
966  int ret = scale_internal(c->cascaded_context[0],
967  srcSlice, srcStride, srcSliceY, srcSliceH,
968  c->cascaded_tmp[0], c->cascaded_tmpStride[0], 0, c->opts.src_h);
969 
970  if (ret < 0)
971  return ret;
972 
973  if (c->cascaded_context[2])
974  ret = scale_internal(c->cascaded_context[1], (const uint8_t * const *)c->cascaded_tmp[0],
975  c->cascaded_tmpStride[0], srcSliceY, srcSliceH,
976  c->cascaded_tmp[1], c->cascaded_tmpStride[1], 0, c->opts.dst_h);
977  else
978  ret = scale_internal(c->cascaded_context[1], (const uint8_t * const *)c->cascaded_tmp[0],
979  c->cascaded_tmpStride[0], srcSliceY, srcSliceH,
980  dstSlice, dstStride, dstSliceY, dstSliceH);
981 
982  if (ret < 0)
983  return ret;
984 
985  if (c->cascaded_context[2]) {
986  const int dstY1 = sws_internal(c->cascaded_context[1])->dstY;
987  ret = scale_internal(c->cascaded_context[2], (const uint8_t * const *)c->cascaded_tmp[1],
988  c->cascaded_tmpStride[1], dstY1 - ret, dstY1,
989  dstSlice, dstStride, dstSliceY, dstSliceH);
990  }
991  return ret;
992 }
993 
995  const uint8_t * const srcSlice[], const int srcStride[],
996  int srcSliceY, int srcSliceH,
997  uint8_t * const dstSlice[], const int dstStride[],
998  int dstSliceY, int dstSliceH)
999 {
1000  const int dstH0 = c->cascaded_context[0]->dst_h;
1001  int ret = scale_internal(c->cascaded_context[0],
1002  srcSlice, srcStride, srcSliceY, srcSliceH,
1003  c->cascaded_tmp[0], c->cascaded_tmpStride[0],
1004  0, dstH0);
1005  if (ret < 0)
1006  return ret;
1007  ret = scale_internal(c->cascaded_context[1],
1008  (const uint8_t * const * )c->cascaded_tmp[0], c->cascaded_tmpStride[0],
1009  0, dstH0, dstSlice, dstStride, dstSliceY, dstSliceH);
1010  return ret;
1011 }
1012 
1014  const uint8_t * const srcSlice[], const int srcStride[],
1015  int srcSliceY, int srcSliceH,
1016  uint8_t *const dstSlice[], const int dstStride[],
1017  int dstSliceY, int dstSliceH)
1018 {
1020  const int scale_dst = dstSliceY > 0 || dstSliceH < sws->dst_h;
1021  const int frame_start = scale_dst || !c->sliceDir;
1022  int i, ret;
1023  const uint8_t *src2[4];
1024  uint8_t *dst2[4];
1025  int macro_height_src = isBayer(sws->src_format) ? 2 : (1 << c->chrSrcVSubSample);
1026  int macro_height_dst = isBayer(sws->dst_format) ? 2 : (1 << c->chrDstVSubSample);
1027  // copy strides, so they can safely be modified
1028  int srcStride2[4];
1029  int dstStride2[4];
1030  int srcSliceY_internal = srcSliceY;
1031 
1032  if (!srcStride || !dstStride || !dstSlice || !srcSlice) {
1033  av_log(c, AV_LOG_ERROR, "One of the input parameters to sws_scale() is NULL, please check the calling code\n");
1034  return AVERROR(EINVAL);
1035  }
1036 
1037  if ((srcSliceY & (macro_height_src - 1)) ||
1038  ((srcSliceH & (macro_height_src - 1)) && srcSliceY + srcSliceH != sws->src_h) ||
1039  srcSliceY + srcSliceH > sws->src_h ||
1040  (isBayer(sws->src_format) && srcSliceH <= 1)) {
1041  av_log(c, AV_LOG_ERROR, "Slice parameters %d, %d are invalid\n", srcSliceY, srcSliceH);
1042  return AVERROR(EINVAL);
1043  }
1044 
1045  if ((dstSliceY & (macro_height_dst - 1)) ||
1046  ((dstSliceH & (macro_height_dst - 1)) && dstSliceY + dstSliceH != sws->dst_h) ||
1047  dstSliceY + dstSliceH > sws->dst_h) {
1048  av_log(c, AV_LOG_ERROR, "Slice parameters %d, %d are invalid\n", dstSliceY, dstSliceH);
1049  return AVERROR(EINVAL);
1050  }
1051 
1052  if (!check_image_pointers(srcSlice, sws->src_format, srcStride)) {
1053  av_log(c, AV_LOG_ERROR, "bad src image pointers\n");
1054  return AVERROR(EINVAL);
1055  }
1056  if (!check_image_pointers((const uint8_t* const*)dstSlice, sws->dst_format, dstStride)) {
1057  av_log(c, AV_LOG_ERROR, "bad dst image pointers\n");
1058  return AVERROR(EINVAL);
1059  }
1060 
1061  // do not mess up sliceDir if we have a "trailing" 0-size slice
1062  if (srcSliceH == 0)
1063  return 0;
1064 
1065  if (sws->gamma_flag && c->cascaded_context[0])
1066  return scale_gamma(c, srcSlice, srcStride, srcSliceY, srcSliceH,
1067  dstSlice, dstStride, dstSliceY, dstSliceH);
1068 
1069  if (c->cascaded_context[0] && srcSliceY == 0 && srcSliceH == c->cascaded_context[0]->src_h)
1070  return scale_cascaded(c, srcSlice, srcStride, srcSliceY, srcSliceH,
1071  dstSlice, dstStride, dstSliceY, dstSliceH);
1072 
1073  if (!srcSliceY && (sws->flags & SWS_BITEXACT) && sws->dither == SWS_DITHER_ED && c->dither_error[0])
1074  for (i = 0; i < 4; i++)
1075  memset(c->dither_error[i], 0, sizeof(c->dither_error[0][0]) * (sws->dst_w+2));
1076 
1077  if (usePal(sws->src_format))
1078  ff_update_palette(c, (const uint32_t *)srcSlice[1]);
1079 
1080  memcpy(src2, srcSlice, sizeof(src2));
1081  memcpy(dst2, dstSlice, sizeof(dst2));
1082  memcpy(srcStride2, srcStride, sizeof(srcStride2));
1083  memcpy(dstStride2, dstStride, sizeof(dstStride2));
1084 
1085  if (frame_start && !scale_dst) {
1086  if (srcSliceY != 0 && srcSliceY + srcSliceH != sws->src_h) {
1087  av_log(c, AV_LOG_ERROR, "Slices start in the middle!\n");
1088  return AVERROR(EINVAL);
1089  }
1090 
1091  c->sliceDir = (srcSliceY == 0) ? 1 : -1;
1092  } else if (scale_dst)
1093  c->sliceDir = 1;
1094 
1095  if (c->src0Alpha && !c->dst0Alpha && isALPHA(sws->dst_format)) {
1096  uint8_t *base;
1097  int x,y;
1098 
1099  av_fast_malloc(&c->rgb0_scratch, &c->rgb0_scratch_allocated,
1100  FFABS(srcStride[0]) * srcSliceH + 32);
1101  if (!c->rgb0_scratch)
1102  return AVERROR(ENOMEM);
1103 
1104  base = srcStride[0] < 0 ? c->rgb0_scratch - srcStride[0] * (srcSliceH-1) :
1105  c->rgb0_scratch;
1106  for (y=0; y<srcSliceH; y++){
1107  memcpy(base + srcStride[0]*y, src2[0] + srcStride[0]*y, 4*sws->src_w);
1108  for (x=c->src0Alpha-1; x<4*sws->src_w; x+=4) {
1109  base[ srcStride[0]*y + x] = 0xFF;
1110  }
1111  }
1112  src2[0] = base;
1113  }
1114 
1115  if (c->srcXYZ && !(c->dstXYZ && sws->src_w==sws->dst_w && sws->src_h==sws->dst_h)) {
1116  uint8_t *base;
1117 
1118  av_fast_malloc(&c->xyz_scratch, &c->xyz_scratch_allocated,
1119  FFABS(srcStride[0]) * srcSliceH + 32);
1120  if (!c->xyz_scratch)
1121  return AVERROR(ENOMEM);
1122 
1123  base = srcStride[0] < 0 ? c->xyz_scratch - srcStride[0] * (srcSliceH-1) :
1124  c->xyz_scratch;
1125 
1126  c->xyz12Torgb48(c, base, srcStride[0], src2[0], srcStride[0], sws->src_w, srcSliceH);
1127  src2[0] = base;
1128  }
1129 
1130  if (c->sliceDir != 1) {
1131  // slices go from bottom to top => we flip the image internally
1132  for (i=0; i<4; i++) {
1133  srcStride2[i] *= -1;
1134  dstStride2[i] *= -1;
1135  }
1136 
1137  src2[0] += (srcSliceH - 1) * srcStride[0];
1138  if (!usePal(sws->src_format))
1139  src2[1] += ((srcSliceH >> c->chrSrcVSubSample) - 1) * srcStride[1];
1140  src2[2] += ((srcSliceH >> c->chrSrcVSubSample) - 1) * srcStride[2];
1141  src2[3] += (srcSliceH - 1) * srcStride[3];
1142  dst2[0] += ( sws->dst_h - 1) * dstStride[0];
1143  dst2[1] += ((sws->dst_h >> c->chrDstVSubSample) - 1) * dstStride[1];
1144  dst2[2] += ((sws->dst_h >> c->chrDstVSubSample) - 1) * dstStride[2];
1145  dst2[3] += ( sws->dst_h - 1) * dstStride[3];
1146 
1147  srcSliceY_internal = sws->src_h-srcSliceY-srcSliceH;
1148  }
1150  reset_ptr((void*)dst2, sws->dst_format);
1151 
1152  if (c->convert_unscaled) {
1153  int offset = srcSliceY_internal;
1154  int slice_h = srcSliceH;
1155 
1156  // for dst slice scaling, offset the pointers to match the unscaled API
1157  if (scale_dst) {
1158  av_assert0(offset == 0);
1159  for (i = 0; i < 4 && src2[i]; i++) {
1160  if (!src2[i] || (i > 0 && usePal(sws->src_format)))
1161  break;
1162  src2[i] += (dstSliceY >> ((i == 1 || i == 2) ? c->chrSrcVSubSample : 0)) * srcStride2[i];
1163  }
1164 
1165  for (i = 0; i < 4 && dst2[i]; i++) {
1166  if (!dst2[i] || (i > 0 && usePal(sws->dst_format)))
1167  break;
1168  dst2[i] -= (dstSliceY >> ((i == 1 || i == 2) ? c->chrDstVSubSample : 0)) * dstStride2[i];
1169  }
1170  offset = dstSliceY;
1171  slice_h = dstSliceH;
1172  }
1173 
1174  ret = c->convert_unscaled(c, src2, srcStride2, offset, slice_h,
1175  dst2, dstStride2);
1176  if (scale_dst)
1177  dst2[0] += dstSliceY * dstStride2[0];
1178  } else {
1179  ret = ff_swscale(c, src2, srcStride2, srcSliceY_internal, srcSliceH,
1180  dst2, dstStride2, dstSliceY, dstSliceH);
1181  }
1182 
1183  if (c->dstXYZ && !(c->srcXYZ && sws->src_w==sws->dst_w && sws->src_h==sws->dst_h)) {
1184  uint8_t *dst;
1185 
1186  if (scale_dst) {
1187  dst = dst2[0];
1188  } else {
1189  int dstY = c->dstY ? c->dstY : srcSliceY + srcSliceH;
1190 
1191  av_assert0(dstY >= ret);
1192  av_assert0(ret >= 0);
1193  av_assert0(sws->dst_h >= dstY);
1194  dst = dst2[0] + (dstY - ret) * dstStride2[0];
1195  }
1196 
1197  /* replace on the same data */
1198  c->rgb48Toxyz12(c, dst, dstStride2[0], dst, dstStride2[0], sws->dst_w, ret);
1199  }
1200 
1201  /* reset slice direction at end of frame */
1202  if ((srcSliceY_internal + srcSliceH == sws->src_h) || scale_dst)
1203  c->sliceDir = 0;
1204 
1205  return ret;
1206 }
1207 
1209 {
1211  av_frame_unref(c->frame_src);
1212  av_frame_unref(c->frame_dst);
1213  c->src_ranges.nb_ranges = 0;
1214 }
1215 
1217 {
1219  int ret, allocated = 0;
1220 
1221  ret = av_frame_ref(c->frame_src, src);
1222  if (ret < 0)
1223  return ret;
1224 
1225  if (!dst->buf[0]) {
1226  dst->width = sws->dst_w;
1227  dst->height = sws->dst_h;
1228  dst->format = sws->dst_format;
1229 
1230  ret = av_frame_get_buffer(dst, 0);
1231  if (ret < 0)
1232  return ret;
1233  allocated = 1;
1234  }
1235 
1236  ret = av_frame_ref(c->frame_dst, dst);
1237  if (ret < 0) {
1238  if (allocated)
1240 
1241  return ret;
1242  }
1243 
1244  return 0;
1245 }
1246 
1248  unsigned int slice_height)
1249 {
1251  int ret;
1252 
1253  ret = ff_range_add(&c->src_ranges, slice_start, slice_height);
1254  if (ret < 0)
1255  return ret;
1256 
1257  return 0;
1258 }
1259 
1261 {
1263  if (c->slice_ctx)
1264  return sws_internal(c->slice_ctx[0])->dst_slice_align;
1265 
1266  return c->dst_slice_align;
1267 }
1268 
1270  unsigned int slice_height)
1271 {
1273  unsigned int align = sws_receive_slice_alignment(sws);
1274  uint8_t *dst[4];
1275 
1276  /* wait until complete input has been received */
1277  if (!(c->src_ranges.nb_ranges == 1 &&
1278  c->src_ranges.ranges[0].start == 0 &&
1279  c->src_ranges.ranges[0].len == sws->src_h))
1280  return AVERROR(EAGAIN);
1281 
1282  if ((slice_start > 0 || slice_height < sws->dst_h) &&
1283  (slice_start % align || slice_height % align)) {
1285  "Incorrectly aligned output: %u/%u not multiples of %u\n",
1286  slice_start, slice_height, align);
1287  return AVERROR(EINVAL);
1288  }
1289 
1290  if (c->slicethread) {
1291  int nb_jobs = c->nb_slice_ctx;
1292  int ret = 0;
1293 
1294  if (c->slice_ctx[0]->dither == SWS_DITHER_ED)
1295  nb_jobs = 1;
1296 
1297  c->dst_slice_start = slice_start;
1298  c->dst_slice_height = slice_height;
1299 
1300  avpriv_slicethread_execute(c->slicethread, nb_jobs, 0);
1301 
1302  for (int i = 0; i < c->nb_slice_ctx; i++) {
1303  if (c->slice_err[i] < 0) {
1304  ret = c->slice_err[i];
1305  break;
1306  }
1307  }
1308 
1309  memset(c->slice_err, 0, c->nb_slice_ctx * sizeof(*c->slice_err));
1310 
1311  return ret;
1312  }
1313 
1314  for (int i = 0; i < FF_ARRAY_ELEMS(dst); i++) {
1315  ptrdiff_t offset = c->frame_dst->linesize[i] * (ptrdiff_t)(slice_start >> c->chrDstVSubSample);
1316  dst[i] = FF_PTR_ADD(c->frame_dst->data[i], offset);
1317  }
1318 
1319  return scale_internal(sws, (const uint8_t * const *)c->frame_src->data,
1320  c->frame_src->linesize, 0, sws->src_h,
1321  dst, c->frame_dst->linesize, slice_start, slice_height);
1322 }
1323 
1325 {
1326  SwsImg img = {0};
1327 
1328  img.frame_ptr = frame;
1329  img.fmt = frame->format;
1330  for (int i = 0; i < 4; i++) {
1331  img.data[i] = frame->data[i];
1332  img.linesize[i] = frame->linesize[i];
1333  }
1334 
1335  if (!(frame->flags & AV_FRAME_FLAG_INTERLACED)) {
1336  av_assert1(!field);
1337  return img;
1338  }
1339 
1340  if (field == FIELD_BOTTOM) {
1341  /* Odd rows, offset by one line */
1343  for (int i = 0; i < 4; i++) {
1344  if (img.data[i])
1345  img.data[i] += img.linesize[i];
1346  if (desc->flags & AV_PIX_FMT_FLAG_PAL)
1347  break;
1348  }
1349  }
1350 
1351  /* Take only every second line */
1352  for (int i = 0; i < 4; i++)
1353  img.linesize[i] <<= 1;
1354 
1355  return img;
1356 }
1357 
1358 /* Subset of av_frame_ref() that only references (video) data buffers */
1359 static int frame_ref(AVFrame *dst, const AVFrame *src)
1360 {
1361  /* ref the buffers */
1362  for (int i = 0; i < FF_ARRAY_ELEMS(src->buf); i++) {
1363  if (!src->buf[i])
1364  continue;
1365  dst->buf[i] = av_buffer_ref(src->buf[i]);
1366  if (!dst->buf[i])
1367  return AVERROR(ENOMEM);
1368  }
1369 
1370  memcpy(dst->data, src->data, sizeof(src->data));
1371  memcpy(dst->linesize, src->linesize, sizeof(src->linesize));
1372  return 0;
1373 }
1374 
1376 {
1377  int ret;
1379  if (!src || !dst)
1380  return AVERROR(EINVAL);
1381 
1382  if (c->frame_src) {
1383  /* Context has been initialized with explicit values, fall back to
1384  * legacy API */
1385  ret = sws_frame_start(sws, dst, src);
1386  if (ret < 0)
1387  return ret;
1388 
1389  ret = sws_send_slice(sws, 0, src->height);
1390  if (ret >= 0)
1391  ret = sws_receive_slice(sws, 0, dst->height);
1392 
1393  sws_frame_end(sws);
1394 
1395  return ret;
1396  }
1397 
1398  ret = sws_frame_setup(sws, dst, src);
1399  if (ret < 0)
1400  return ret;
1401 
1402  if (!src->data[0])
1403  return 0;
1404 
1405  if (c->graph[FIELD_TOP]->noop &&
1406  (!c->graph[FIELD_BOTTOM] || c->graph[FIELD_BOTTOM]->noop) &&
1407  src->buf[0] && !dst->buf[0] && !dst->data[0])
1408  {
1409  /* Lightweight refcopy */
1410  ret = frame_ref(dst, src);
1411  if (ret < 0)
1412  return ret;
1413  } else {
1414  if (!dst->data[0]) {
1415  ret = av_frame_get_buffer(dst, 0);
1416  if (ret < 0)
1417  return ret;
1418  }
1419 
1420  for (int field = 0; field < 2; field++) {
1421  SwsGraph *graph = c->graph[field];
1424  ff_sws_graph_run(graph, &output, &input);
1425  if (!graph->dst.interlaced)
1426  break;
1427  }
1428  }
1429 
1430  return 0;
1431 }
1432 
1434 {
1435 #define VALIDATE(field, min, max) \
1436  if (ctx->field < min || ctx->field > max) { \
1437  av_log(ctx, AV_LOG_ERROR, "'%s' (%d) out of range [%d, %d]\n", \
1438  #field, (int) ctx->field, min, max); \
1439  return AVERROR(EINVAL); \
1440  }
1441 
1442  VALIDATE(threads, 0, SWS_MAX_THREADS);
1443  VALIDATE(dither, 0, SWS_DITHER_NB - 1)
1444  VALIDATE(alpha_blend, 0, SWS_ALPHA_BLEND_NB - 1)
1445  return 0;
1446 }
1447 
1449 {
1451  const char *err_msg;
1452  int ret;
1453 
1454  if (!src || !dst)
1455  return AVERROR(EINVAL);
1456  if ((ret = validate_params(ctx)) < 0)
1457  return ret;
1458 
1459  /* For now, if a single frame has a context, then both need a context */
1460  if (!!src->hw_frames_ctx != !!dst->hw_frames_ctx) {
1461  return AVERROR(ENOTSUP);
1462  } else if (!!src->hw_frames_ctx) {
1463  /* Both hardware frames must already be allocated */
1464  if (!src->data[0] || !dst->data[0])
1465  return AVERROR(EINVAL);
1466 
1467  AVHWFramesContext *src_hwfc, *dst_hwfc;
1468  src_hwfc = (AVHWFramesContext *)src->hw_frames_ctx->data;
1469  dst_hwfc = (AVHWFramesContext *)dst->hw_frames_ctx->data;
1470 
1471  /* Both frames must live on the same device */
1472  if (src_hwfc->device_ref->data != dst_hwfc->device_ref->data)
1473  return AVERROR(EINVAL);
1474 
1475  /* Only Vulkan devices are supported */
1476  AVHWDeviceContext *dev_ctx;
1477  dev_ctx = (AVHWDeviceContext *)src_hwfc->device_ref->data;
1478  if (dev_ctx->type != AV_HWDEVICE_TYPE_VULKAN)
1479  return AVERROR(ENOTSUP);
1480 
1481 #if CONFIG_VULKAN
1482  ret = ff_sws_vk_init(ctx, src_hwfc->device_ref);
1483  if (ret < 0)
1484  return ret;
1485 #endif
1486  }
1487 
1488  for (int field = 0; field < 2; field++) {
1489  SwsFormat src_fmt = ff_fmt_from_frame(src, field);
1490  SwsFormat dst_fmt = ff_fmt_from_frame(dst, field);
1491  int src_ok, dst_ok;
1492 
1493  if ((src->flags ^ dst->flags) & AV_FRAME_FLAG_INTERLACED) {
1494  err_msg = "Cannot convert interlaced to progressive frames or vice versa.\n";
1495  ret = AVERROR(EINVAL);
1496  goto fail;
1497  }
1498 
1499  src_ok = ff_test_fmt(&src_fmt, 0);
1500  dst_ok = ff_test_fmt(&dst_fmt, 1);
1501  if ((!src_ok || !dst_ok) && !ff_props_equal(&src_fmt, &dst_fmt)) {
1502  err_msg = src_ok ? "Unsupported output" : "Unsupported input";
1503  ret = AVERROR(ENOTSUP);
1504  goto fail;
1505  }
1506 
1507  ret = ff_sws_graph_reinit(ctx, &dst_fmt, &src_fmt, field, &s->graph[field]);
1508  if (ret < 0) {
1509  err_msg = "Failed initializing scaling graph";
1510  goto fail;
1511  }
1512 
1513  if (s->graph[field]->incomplete && ctx->flags & SWS_STRICT) {
1514  err_msg = "Incomplete scaling graph";
1515  ret = AVERROR(EINVAL);
1516  goto fail;
1517  }
1518 
1519  if (!src_fmt.interlaced) {
1520  ff_sws_graph_free(&s->graph[FIELD_BOTTOM]);
1521  break;
1522  }
1523 
1524  continue;
1525 
1526  fail:
1527  av_log(ctx, AV_LOG_ERROR, "%s (%s): fmt:%s csp:%s prim:%s trc:%s ->"
1528  " fmt:%s csp:%s prim:%s trc:%s\n",
1529  err_msg, av_err2str(ret),
1534 
1535  for (int i = 0; i < FF_ARRAY_ELEMS(s->graph); i++)
1536  ff_sws_graph_free(&s->graph[i]);
1537 
1538  return ret;
1539  }
1540 
1541  return 0;
1542 }
1543 
1544 /**
1545  * swscale wrapper, so we don't need to export the SwsContext.
1546  * Assumes planar YUV to be in YUV order instead of YVU.
1547  */
1549  const uint8_t * const srcSlice[],
1550  const int srcStride[], int srcSliceY,
1551  int srcSliceH, uint8_t *const dst[],
1552  const int dstStride[])
1553 {
1555  if (c->nb_slice_ctx) {
1556  sws = c->slice_ctx[0];
1557  c = sws_internal(sws);
1558  }
1559 
1560  return scale_internal(sws, srcSlice, srcStride, srcSliceY, srcSliceH,
1561  dst, dstStride, 0, sws->dst_h);
1562 }
1563 
1564 void ff_sws_slice_worker(void *priv, int jobnr, int threadnr,
1565  int nb_jobs, int nb_threads)
1566 {
1567  SwsInternal *parent = priv;
1568  SwsContext *sws = parent->slice_ctx[threadnr];
1570 
1571  const int slice_height = FFALIGN(FFMAX((parent->dst_slice_height + nb_jobs - 1) / nb_jobs, 1),
1572  c->dst_slice_align);
1573  const int slice_start = jobnr * slice_height;
1574  const int slice_end = FFMIN((jobnr + 1) * slice_height, parent->dst_slice_height);
1575  int err = 0;
1576 
1577  if (slice_end > slice_start) {
1578  uint8_t *dst[4] = { NULL };
1579 
1580  for (int i = 0; i < FF_ARRAY_ELEMS(dst) && parent->frame_dst->data[i]; i++) {
1581  const int vshift = (i == 1 || i == 2) ? c->chrDstVSubSample : 0;
1582  const ptrdiff_t offset = parent->frame_dst->linesize[i] *
1583  (ptrdiff_t)((slice_start + parent->dst_slice_start) >> vshift);
1584 
1585  dst[i] = parent->frame_dst->data[i] + offset;
1586  }
1587 
1588  err = scale_internal(sws, (const uint8_t * const *)parent->frame_src->data,
1589  parent->frame_src->linesize, 0, sws->src_h,
1590  dst, parent->frame_dst->linesize,
1592  }
1593 
1594  parent->slice_err[threadnr] = err;
1595 }
sws_init_swscale
static av_cold void sws_init_swscale(SwsInternal *c)
Definition: swscale.c:663
isBayer
static av_always_inline int isBayer(enum AVPixelFormat pix_fmt)
Definition: swscale_internal.h:861
flags
const SwsFlags flags[]
Definition: swscale.c:61
ff_test_fmt
int ff_test_fmt(const SwsFormat *fmt, int output)
Definition: format.c:600
_dst
uint8_t * _dst
Definition: dsp.h:56
yuv2planar1_fn
void(* yuv2planar1_fn)(const int16_t *src, uint8_t *dest, int dstW, const uint8_t *dither, int offset)
Write one line of horizontally scaled data to planar output without any additional vertical scaling (...
Definition: swscale_internal.h:121
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:216
process
static void process(NormalizeContext *s, AVFrame *in, AVFrame *out)
Definition: vf_normalize.c:155
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
SwsPlane::sliceH
int sliceH
number of lines
Definition: swscale_internal.h:1107
ff_props_equal
static int ff_props_equal(const SwsFormat *fmt1, const SwsFormat *fmt2)
Definition: format.h:119
isPacked
static av_always_inline int isPacked(enum AVPixelFormat pix_fmt)
Definition: swscale_internal.h:906
r
const char * r
Definition: vf_curves.c:127
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
mem_internal.h
ff_fmt_from_frame
SwsFormat ff_fmt_from_frame(const AVFrame *frame, int field)
This function also sanitizes and strips the input data, removing irrelevant fields for certain format...
Definition: format.c:312
av_frame_get_buffer
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:206
AV_PIX_FMT_BGR32
#define AV_PIX_FMT_BGR32
Definition: pixfmt.h:513
SwsFormat::interlaced
int interlaced
Definition: format.h:79
lumRangeToJpeg16_c
static void lumRangeToJpeg16_c(int16_t *_dst, int width, uint32_t coeff, int64_t offset)
Definition: swscale.c:237
lumRangeToJpeg_c
static void lumRangeToJpeg_c(int16_t *dst, int width, uint32_t _coeff, int64_t _offset)
Definition: swscale.c:189
ff_sws_init_range_convert_aarch64
av_cold void ff_sws_init_range_convert_aarch64(SwsInternal *c)
Definition: swscale.c:314
frame_start
static void frame_start(MPVMainEncContext *const m)
Definition: mpegvideo_enc.c:1916
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:3456
SwsContext::src_w
int src_w
Deprecated frame property overrides, for the legacy API only.
Definition: swscale.h:237
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:90
ff_sws_graph_reinit
int ff_sws_graph_reinit(SwsContext *ctx, const SwsFormat *dst, const SwsFormat *src, int field, SwsGraph **out_graph)
Wrapper around ff_sws_graph_create() that reuses the existing graph if the format is compatible.
Definition: graph.c:821
ff_rotate_slice
int ff_rotate_slice(SwsSlice *s, int lum, int chr)
Definition: slice.c:120
int64_t
long long int64_t
Definition: coverity.c:34
output
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
Definition: filter_design.txt:226
AV_PIX_FMT_FLAG_FLOAT
#define AV_PIX_FMT_FLAG_FLOAT
The pixel format contains IEEE-754 floating point values.
Definition: pixdesc.h:158
SwsSlice::plane
SwsPlane plane[MAX_SLICE_PLANES]
color planes
Definition: swscale_internal.h:1125
avpriv_slicethread_execute
void avpriv_slicethread_execute(AVSliceThread *ctx, int nb_jobs, int execute_main)
Execute slice threading.
Definition: slicethread.c:271
ff_sws_init_range_convert_loongarch
av_cold void ff_sws_init_range_convert_loongarch(SwsInternal *c)
Definition: swscale_init_loongarch.c:27
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:427
pixdesc.h
RV_IDX
#define RV_IDX
Definition: swscale_internal.h:468
u
#define u(width, name, range_min, range_max)
Definition: cbs_apv.c:68
isGray
static av_always_inline int isGray(enum AVPixelFormat pix_fmt)
Definition: swscale_internal.h:806
RU_IDX
#define RU_IDX
Definition: swscale_internal.h:465
SWS_BITEXACT
@ SWS_BITEXACT
Definition: swscale.h:158
b
#define b
Definition: input.c:42
SwsFilterDescriptor
Struct which holds all necessary data for processing a slice.
Definition: swscale_internal.h:1132
yuv2planeX
static void FUNC() yuv2planeX(const int16_t *filter, int filterSize, const int16_t **src, uint8_t *dest, int dstW, const uint8_t *dither, int offset)
Definition: swscale_ppc_template.c:84
data
const char data[16]
Definition: mxf.c:149
GV_IDX
#define GV_IDX
Definition: swscale_internal.h:469
AV_PIX_FMT_MONOWHITE
@ AV_PIX_FMT_MONOWHITE
Y , 1bpp, 0 is white, 1 is black, in each byte pixels are ordered from the msb to the lsb.
Definition: pixfmt.h:82
rgb2yuv
static const char rgb2yuv[]
Definition: vf_scale_vulkan.c:84
BV_IDX
#define BV_IDX
Definition: swscale_internal.h:470
AV_PIX_FMT_RGB32_1
#define AV_PIX_FMT_RGB32_1
Definition: pixfmt.h:512
SwsContext::flags
unsigned flags
Bitmask of SWS_*.
Definition: swscale.h:204
base
uint8_t base
Definition: vp3data.h:128
filter
void(* filter)(uint8_t *src, int stride, int qscale)
Definition: h263dsp.c:29
sws_receive_slice
int sws_receive_slice(SwsContext *sws, unsigned int slice_start, unsigned int slice_height)
Request a horizontal slice of the output data to be written into the frame previously provided to sws...
Definition: swscale.c:1269
AV_PIX_FMT_BGR24
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:76
ff_sws_init_swscale_riscv
av_cold void ff_sws_init_swscale_riscv(SwsInternal *c)
Definition: swscale.c:74
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
av_buffer_ref
AVBufferRef * av_buffer_ref(const AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:103
av_get_cpu_flags
int av_get_cpu_flags(void)
Return the flags which specify extensions supported by the CPU.
Definition: cpu.c:109
DEBUG_BUFFERS
#define DEBUG_BUFFERS(...)
Definition: swscale.c:259
SwsImg
Represents a view into a single field of frame data.
Definition: graph.h:35
bit_depth
static void bit_depth(AudioStatsContext *s, const uint64_t *const mask, uint8_t *depth)
Definition: af_astats.c:246
cpu_flags
static atomic_int cpu_flags
Definition: cpu.c:56
hScale16To15_c
static void hScale16To15_c(SwsInternal *c, int16_t *dst, int dstW, const uint8_t *_src, const int16_t *filter, const int32_t *filterPos, int filterSize)
Definition: swscale.c:99
_src
uint8_t ptrdiff_t const uint8_t * _src
Definition: dsp.h:56
SwsInternal::frame_dst
AVFrame * frame_dst
Definition: swscale_internal.h:371
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:448
AV_HWDEVICE_TYPE_VULKAN
@ AV_HWDEVICE_TYPE_VULKAN
Definition: hwcontext.h:39
SWS_FAST_BILINEAR
@ SWS_FAST_BILINEAR
Scaler selection options.
Definition: swscale.h:100
is16BPS
static av_always_inline int is16BPS(enum AVPixelFormat pix_fmt)
Definition: swscale_internal.h:746
ff_sws_init_input_funcs
void ff_sws_init_input_funcs(SwsInternal *c, planar1_YV12_fn *lumToYV12, planar1_YV12_fn *alpToYV12, planar2_YV12_fn *chrToYV12, planarX_YV12_fn *readLumPlanar, planarX_YV12_fn *readAlpPlanar, planarX2_YV12_fn *readChrPlanar)
validate_params
static int validate_params(SwsContext *ctx)
Definition: swscale.c:1433
chrRangeToJpeg16_c
static void chrRangeToJpeg16_c(int16_t *_dstU, int16_t *_dstV, int width, uint32_t coeff, int64_t offset)
Definition: swscale.c:211
slice_end
static int slice_end(AVCodecContext *avctx, AVFrame *pict, int *got_output)
Handle slice ends.
Definition: mpeg12dec.c:1693
av_color_space_name
const char * av_color_space_name(enum AVColorSpace space)
Definition: pixdesc.c:3856
AV_PIX_FMT_GBRAP
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:212
chrRangeFromJpeg_c
static void chrRangeFromJpeg_c(int16_t *dstU, int16_t *dstV, int width, uint32_t _coeff, int64_t _offset)
Definition: swscale.c:177
SWS_MAX_THREADS
#define SWS_MAX_THREADS
Definition: swscale_internal.h:52
fail
#define fail()
Definition: checkasm.h:218
chrRangeFromJpeg16_c
static void chrRangeFromJpeg16_c(int16_t *_dstU, int16_t *_dstV, int width, uint32_t coeff, int64_t offset)
Definition: swscale.c:225
SwsInternal::frame_src
AVFrame * frame_src
Definition: swscale_internal.h:370
sws_frame_setup
int sws_frame_setup(SwsContext *ctx, const AVFrame *dst, const AVFrame *src)
Like sws_scale_frame, but without actually scaling.
Definition: swscale.c:1448
ff_sws_init_xyzdsp_aarch64
av_cold void ff_sws_init_xyzdsp_aarch64(SwsInternal *c)
Definition: swscale.c:339
val
static double val(void *priv, double ch)
Definition: aeval.c:77
SWS_ALPHA_BLEND_NB
@ SWS_ALPHA_BLEND_NB
Definition: swscale.h:92
isNBPS
static av_always_inline int isNBPS(enum AVPixelFormat pix_fmt)
Definition: swscale_internal.h:760
init_range_convert_constants
static void init_range_convert_constants(SwsInternal *c)
Definition: swscale.c:592
SwsColor::trc
enum AVColorTransferCharacteristic trc
Definition: format.h:62
AVHWDeviceContext
This struct aggregates all the (hardware/vendor-specific) "high-level" state, i.e.
Definition: hwcontext.h:63
AV_PIX_FMT_BGR8
@ AV_PIX_FMT_BGR8
packed RGB 3:3:2, 8bpp, (msb)2B 3G 3R(lsb)
Definition: pixfmt.h:90
avassert.h
hScale8To19_c
static void hScale8To19_c(SwsInternal *c, int16_t *_dst, int dstW, const uint8_t *src, const int16_t *filter, const int32_t *filterPos, int filterSize)
Definition: swscale.c:144
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:210
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:106
yuv2packed2_fn
void(* yuv2packed2_fn)(SwsInternal *c, const int16_t *lumSrc[2], const int16_t *chrUSrc[2], const int16_t *chrVSrc[2], const int16_t *alpSrc[2], uint8_t *dest, int dstW, int yalpha, int uvalpha, int y)
Write one line of horizontally scaled Y/U/V/A to packed-pixel YUV/RGB output by doing bilinear scalin...
Definition: swscale_internal.h:227
SwsContext::dither
SwsDither dither
Dither mode.
Definition: swscale.h:219
SwsInternal::slice_err
int * slice_err
Definition: swscale_internal.h:339
emms_c
#define emms_c()
Definition: emms.h:63
ff_sws_vk_init
int ff_sws_vk_init(SwsContext *sws, AVBufferRef *dev_ref)
Definition: ops.c:42
intreadwrite.h
dither
static const uint16_t dither[8][8]
Definition: vf_gradfun.c:46
SwsInternal::slice_ctx
SwsContext ** slice_ctx
Definition: swscale_internal.h:338
s
#define s(width, name)
Definition: cbs_vp9.c:198
ff_update_palette
void ff_update_palette(SwsInternal *c, const uint32_t *pal)
Definition: swscale.c:874
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:60
ff_sws_init_swscale_arm
av_cold void ff_sws_init_swscale_arm(SwsInternal *c)
Definition: swscale.c:33
AVFormatContext::flags
int flags
Flags modifying the (de)muxer behaviour.
Definition: avformat.h:1415
g
const char * g
Definition: vf_curves.c:128
pix_fmt
static enum AVPixelFormat pix_fmt
Definition: demux_decode.c:41
ops.h
SwsSlice::width
int width
Slice line width.
Definition: swscale_internal.h:1119
bits
uint8_t bits
Definition: vp3data.h:128
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:42
VALIDATE
#define VALIDATE(field, min, max)
ctx
static AVFormatContext * ctx
Definition: movenc.c:49
AV_RL16
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_RL16
Definition: bytestream.h:94
chrRangeToJpeg_c
static void chrRangeToJpeg_c(int16_t *dstU, int16_t *dstV, int width, uint32_t _coeff, int64_t _offset)
Definition: swscale.c:163
ff_hcscale_fast_c
void ff_hcscale_fast_c(SwsInternal *c, int16_t *dst1, int16_t *dst2, int dstWidth, const uint8_t *src1, const uint8_t *src2, int srcW, int xInc)
Definition: hscale_fast_bilinear.c:38
field
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this field
Definition: writing_filters.txt:78
ff_sws_init_range_convert_riscv
av_cold void ff_sws_init_range_convert_riscv(SwsInternal *c)
Definition: swscale.c:29
GY_IDX
#define GY_IDX
Definition: swscale_internal.h:463
AV_PIX_FMT_BGR32_1
#define AV_PIX_FMT_BGR32_1
Definition: pixfmt.h:514
sws_frame_end
void sws_frame_end(SwsContext *sws)
Finish the scaling process for a pair of source/destination frames previously submitted with sws_fram...
Definition: swscale.c:1208
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:74
if
if(ret)
Definition: filter_design.txt:179
ff_sws_init_range_convert_x86
av_cold void ff_sws_init_range_convert_x86(SwsInternal *c)
Definition: swscale.c:474
AV_PIX_FMT_GRAY8A
@ AV_PIX_FMT_GRAY8A
alias for AV_PIX_FMT_YA8
Definition: pixfmt.h:143
scale_internal
static int scale_internal(SwsContext *sws, const uint8_t *const srcSlice[], const int srcStride[], int srcSliceY, int srcSliceH, uint8_t *const dstSlice[], const int dstStride[], int dstSliceY, int dstSliceH)
Definition: swscale.c:1013
fillPlane
static av_always_inline void fillPlane(uint8_t *plane, int stride, int width, int height, int y, uint8_t val)
Definition: swscale.c:58
FIELD_BOTTOM
@ FIELD_BOTTOM
Definition: format.h:57
NULL
#define NULL
Definition: coverity.c:32
rgb48Toxyz12_c
static void rgb48Toxyz12_c(const SwsInternal *c, uint8_t *dst, int dst_stride, const uint8_t *src, int src_stride, int w, int h)
Definition: swscale.c:805
format
New swscale design to change SwsGraph is what coordinates multiple passes These can include cascaded scaling error diffusion and so on Or we could have separate passes for the vertical and horizontal scaling In between each SwsPass lies a fully allocated image buffer Graph passes may have different levels of e g we can have a single threaded error diffusion pass following a multi threaded scaling pass SwsGraph is internally recreated whenever the image format
Definition: swscale-v2.txt:14
SwsPlane::available_lines
int available_lines
max number of lines that can be hold by this plane
Definition: swscale_internal.h:1105
AV_WB16
#define AV_WB16(p, v)
Definition: intreadwrite.h:401
SwsContext::gamma_flag
int gamma_flag
Use gamma correct scaling.
Definition: swscale.h:229
AVHWFramesContext::device_ref
AVBufferRef * device_ref
A reference to the parent AVHWDeviceContext.
Definition: hwcontext.h:129
AV_PIX_FMT_MONOBLACK
@ AV_PIX_FMT_MONOBLACK
Y , 1bpp, 0 is black, 1 is white, in each byte pixels are ordered from the msb to the lsb.
Definition: pixfmt.h:83
V
#define V
Definition: avdct.c:32
FF_PTR_ADD
#define FF_PTR_ADD(ptr, off)
Definition: internal.h:80
av_color_primaries_name
const char * av_color_primaries_name(enum AVColorPrimaries primaries)
Definition: pixdesc.c:3790
AV_PIX_FMT_RGB8
@ AV_PIX_FMT_RGB8
packed RGB 3:3:2, 8bpp, (msb)3R 3G 2B(lsb)
Definition: pixfmt.h:93
hScale8To15_c
static void hScale8To15_c(SwsInternal *c, int16_t *dst, int dstW, const uint8_t *src, const int16_t *filter, const int32_t *filterPos, int filterSize)
Definition: swscale.c:128
ff_sws_init_range_convert
av_cold void ff_sws_init_range_convert(SwsInternal *c)
Definition: swscale.c:627
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:81
hScale16To19_c
static void hScale16To19_c(SwsInternal *c, int16_t *_dst, int dstW, const uint8_t *_src, const int16_t *filter, const int32_t *filterPos, int filterSize)
Definition: swscale.c:69
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
SwsInternal::dstY
int dstY
Last destination vertical line output from last slice.
Definition: swscale_internal.h:453
ff_sws_init_xyzdsp
av_cold void ff_sws_init_xyzdsp(SwsInternal *c)
Definition: swscale.c:864
AV_PIX_FMT_BGR4_BYTE
@ AV_PIX_FMT_BGR4_BYTE
packed RGB 1:2:1, 8bpp, (msb)1B 2G 1R(lsb)
Definition: pixfmt.h:92
ff_range_add
int ff_range_add(RangeList *r, unsigned int start, unsigned int len)
Definition: utils.c:2408
attribute_align_arg
#define attribute_align_arg
Definition: internal.h:50
yuv2packedX_fn
void(* yuv2packedX_fn)(SwsInternal *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrc, const int16_t **chrVSrc, int chrFilterSize, const int16_t **alpSrc, uint8_t *dest, int dstW, int y)
Write one line of horizontally scaled Y/U/V/A to packed-pixel YUV/RGB output by doing multi-point ver...
Definition: swscale_internal.h:259
AV_CPU_FLAG_SSE2
#define AV_CPU_FLAG_SSE2
PIV SSE2 functions.
Definition: cpu.h:37
ff_sws_graph_free
void ff_sws_graph_free(SwsGraph **pgraph)
Uninitialize any state associate with this filter graph and free it.
Definition: graph.c:783
ff_sws_slice_worker
void ff_sws_slice_worker(void *priv, int jobnr, int threadnr, int nb_jobs, int nb_threads)
Definition: swscale.c:1564
isBE
static av_always_inline int isBE(enum AVPixelFormat pix_fmt)
Definition: swscale_internal.h:767
AV_PIX_FMT_RGB24
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:75
RY_IDX
#define RY_IDX
Definition: swscale_internal.h:462
ff_sws_init_swscale_loongarch
av_cold void ff_sws_init_swscale_loongarch(SwsInternal *c)
Definition: swscale_init_loongarch.c:62
height
#define height
Definition: dsp.h:89
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:278
DECLARE_ALIGNED
#define DECLARE_ALIGNED(n, t, v)
Definition: mem_internal.h:104
fillPlane16
static void fillPlane16(uint8_t *plane, int stride, int width, int height, int y, int alpha, int bits, const int big_endian)
Definition: swscale_internal.h:1065
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:87
usePal
static av_always_inline int usePal(enum AVPixelFormat pix_fmt)
Definition: swscale_internal.h:937
cpu.h
i
#define i(width, name, range_min, range_max)
Definition: cbs_h264.c:63
isAnyRGB
static av_always_inline int isAnyRGB(enum AVPixelFormat pix_fmt)
Definition: swscale_internal.h:875
av_err2str
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:122
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:424
SwsContext::src_h
int src_h
Width and height of the source frame.
Definition: swscale.h:237
SwsFormat
Definition: format.h:77
xyz12Torgb48_c
static void xyz12Torgb48_c(const SwsInternal *c, uint8_t *dst, int dst_stride, const uint8_t *src, int src_stride, int w, int h)
Definition: swscale.c:746
align
static const uint8_t *BS_FUNC() align(BSCTX *bc)
Skip bits to a byte boundary.
Definition: bitstream_template.h:419
is32BPS
static av_always_inline int is32BPS(enum AVPixelFormat pix_fmt)
Definition: swscale_internal.h:753
AV_WL16
#define AV_WL16(p, v)
Definition: intreadwrite.h:408
AV_PIX_FMT_RGB32
#define AV_PIX_FMT_RGB32
Definition: pixfmt.h:511
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
img
#define img
Definition: vf_colormatrix.c:114
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
lumRangeFromJpeg_c
static void lumRangeFromJpeg_c(int16_t *dst, int width, uint32_t _coeff, int64_t _offset)
Definition: swscale.c:201
SWS_DITHER_NB
@ SWS_DITHER_NB
Definition: swscale.h:86
ff_sws_init_swscale_ppc
av_cold void ff_sws_init_swscale_ppc(SwsInternal *c)
Definition: swscale_altivec.c:233
SwsContext::dst_format
int dst_format
Destination pixel format.
Definition: swscale.h:240
input
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some input
Definition: filter_design.txt:172
fillPlane32
static void fillPlane32(uint8_t *plane, int stride, int width, int height, int y, int alpha, int bits, const int big_endian, int is_float)
Definition: swscale_internal.h:1079
Y
#define Y
Definition: boxblur.h:37
yuv2anyX_fn
void(* yuv2anyX_fn)(SwsInternal *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrc, const int16_t **chrVSrc, int chrFilterSize, const int16_t **alpSrc, uint8_t **dest, int dstW, int y)
Write one line of horizontally scaled Y/U/V/A to YUV/RGB output by doing multi-point vertical scaling...
Definition: swscale_internal.h:293
BY_IDX
#define BY_IDX
Definition: swscale_internal.h:464
ff_sws_init_swscale_x86
av_cold void ff_sws_init_swscale_x86(SwsInternal *c)
Definition: swscale.c:490
scale_cascaded
static int scale_cascaded(SwsInternal *c, const uint8_t *const srcSlice[], const int srcStride[], int srcSliceY, int srcSliceH, uint8_t *const dstSlice[], const int dstStride[], int dstSliceY, int dstSliceH)
Definition: swscale.c:994
emms.h
SwsInternal::dst_slice_align
unsigned int dst_slice_align
Definition: swscale_internal.h:698
sws
static SwsContext * sws[3]
Definition: swscale.c:73
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:68
SwsGraph::dst
SwsFormat dst
Definition: graph.h:142
SwsFormat::format
enum AVPixelFormat format
Definition: format.h:80
sws_send_slice
int sws_send_slice(SwsContext *sws, unsigned int slice_start, unsigned int slice_height)
Indicate that a horizontal slice of input data is available in the source frame previously provided t...
Definition: swscale.c:1247
FIELD_TOP
@ FIELD_TOP
Definition: format.h:56
ff_sws_init_scale
void ff_sws_init_scale(SwsInternal *c)
Definition: swscale.c:698
src2
const pixel * src2
Definition: h264pred_template.c:421
common.h
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:58
check_image_pointers
static int check_image_pointers(const uint8_t *const data[4], enum AVPixelFormat pix_fmt, const int linesizes[4])
Definition: swscale.c:729
av_always_inline
#define av_always_inline
Definition: attributes.h:63
swscale_internal.h
yuv2interleavedX_fn
void(* yuv2interleavedX_fn)(enum AVPixelFormat dstFormat, const uint8_t *chrDither, const int16_t *chrFilter, int chrFilterSize, const int16_t **chrUSrc, const int16_t **chrVSrc, uint8_t *dest, int dstW)
Write one line of horizontally scaled chroma to interleaved output with multi-point vertical scaling ...
Definition: swscale_internal.h:157
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
AV_PIX_FMT_FLAG_BE
#define AV_PIX_FMT_FLAG_BE
Pixel format is big-endian.
Definition: pixdesc.h:116
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:496
SwsContext::dst_h
int dst_h
Width and height of the destination frame.
Definition: swscale.h:238
ff_updateMMXDitherTables
void ff_updateMMXDitherTables(SwsInternal *c, int dstY)
AV_PIX_FMT_RGB4_BYTE
@ AV_PIX_FMT_RGB4_BYTE
packed RGB 1:2:1, 8bpp, (msb)1R 2G 1B(lsb)
Definition: pixfmt.h:95
SwsSlice
Struct which defines a slice of an image to be scaled or an output for a scaled slice.
Definition: swscale_internal.h:1117
AV_FRAME_FLAG_INTERLACED
#define AV_FRAME_FLAG_INTERLACED
A flag to mark frames whose content is interlaced.
Definition: frame.h:650
slice_start
static int slice_start(SliceContext *sc, VVCContext *s, VVCFrameContext *fc, const CodedBitstreamUnit *unit, const int is_first_slice)
Definition: dec.c:844
ff_init_slice_from_src
int ff_init_slice_from_src(SwsSlice *s, uint8_t *const src[4], const int stride[4], int srcW, int lumY, int lumH, int chrY, int chrH, int relative)
Definition: slice.c:148
AVHWFramesContext
This struct describes a set or pool of "hardware" frames (i.e.
Definition: hwcontext.h:118
frame_ref
static int frame_ref(AVFrame *dst, const AVFrame *src)
Definition: swscale.c:1359
SWS_DITHER_ED
@ SWS_DITHER_ED
Definition: swscale.h:83
AV_PIX_FMT_PAL8
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
Definition: pixfmt.h:84
yuv2packed1_fn
void(* yuv2packed1_fn)(SwsInternal *c, const int16_t *lumSrc, const int16_t *chrUSrc[2], const int16_t *chrVSrc[2], const int16_t *alpSrc, uint8_t *dest, int dstW, int uvalpha, int y)
Write one line of horizontally scaled Y/U/V/A to packed-pixel YUV/RGB output without any additional v...
Definition: swscale_internal.h:194
SwsInternal
Definition: swscale_internal.h:330
ret
ret
Definition: filter_design.txt:187
sws_receive_slice_alignment
unsigned int sws_receive_slice_alignment(const SwsContext *sws)
Get the alignment required for slices.
Definition: swscale.c:1260
__asm__
__asm__(".macro parse_r var r\n\t" "\\var = -1\n\t" _IFC_REG(0) _IFC_REG(1) _IFC_REG(2) _IFC_REG(3) _IFC_REG(4) _IFC_REG(5) _IFC_REG(6) _IFC_REG(7) _IFC_REG(8) _IFC_REG(9) _IFC_REG(10) _IFC_REG(11) _IFC_REG(12) _IFC_REG(13) _IFC_REG(14) _IFC_REG(15) _IFC_REG(16) _IFC_REG(17) _IFC_REG(18) _IFC_REG(19) _IFC_REG(20) _IFC_REG(21) _IFC_REG(22) _IFC_REG(23) _IFC_REG(24) _IFC_REG(25) _IFC_REG(26) _IFC_REG(27) _IFC_REG(28) _IFC_REG(29) _IFC_REG(30) _IFC_REG(31) ".iflt \\var\n\t" ".error \"Unable to parse register name \\r\"\n\t" ".endif\n\t" ".endm")
AVHWDeviceContext::type
enum AVHWDeviceType type
This field identifies the underlying API used for hardware access.
Definition: hwcontext.h:75
bswap.h
sws_frame_start
int sws_frame_start(SwsContext *sws, AVFrame *dst, const AVFrame *src)
Initialize the scaling process for a given pair of source/destination frames.
Definition: swscale.c:1216
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:265
sws_pb_64
static const uint8_t sws_pb_64[8]
Definition: swscale.c:54
U
#define U(x)
Definition: vpx_arith.h:37
yuv2planarX_fn
void(* yuv2planarX_fn)(const int16_t *filter, int filterSize, const int16_t **src, uint8_t *dest, int dstW, const uint8_t *dither, int offset)
Write one line of horizontally scaled data to planar output with multi-point vertical scaling between...
Definition: swscale_internal.h:137
reset_ptr
static void reset_ptr(const uint8_t *src[], enum AVPixelFormat format)
Definition: swscale.c:717
ff_init_vscale_pfn
void ff_init_vscale_pfn(SwsInternal *c, yuv2planar1_fn yuv2plane1, yuv2planarX_fn yuv2planeX, yuv2interleavedX_fn yuv2nv12cX, yuv2packed1_fn yuv2packed1, yuv2packed2_fn yuv2packed2, yuv2packedX_fn yuv2packedX, yuv2anyX_fn yuv2anyX, int use_mmx)
setup vertical scaler functions
Definition: vscale.c:258
sws_scale
int attribute_align_arg sws_scale(SwsContext *sws, const uint8_t *const srcSlice[], const int srcStride[], int srcSliceY, int srcSliceH, uint8_t *const dst[], const int dstStride[])
swscale wrapper, so we don't need to export the SwsContext.
Definition: swscale.c:1548
SWS_PRINT_INFO
@ SWS_PRINT_INFO
Emit verbose log of scaling parameters.
Definition: swscale.h:121
SwsFormat::color
SwsColor color
Definition: format.h:86
lumRangeFromJpeg16_c
static void lumRangeFromJpeg16_c(int16_t *_dst, int width, uint32_t coeff, int64_t offset)
Definition: swscale.c:248
atomic_exchange_explicit
#define atomic_exchange_explicit(object, desired, order)
Definition: stdatomic.h:106
SWS_STRICT
@ SWS_STRICT
Return an error on underspecified conversions.
Definition: swscale.h:116
ff_dither_8x8_128
const uint8_t ff_dither_8x8_128[9][8]
Definition: swscale.c:42
AV_CPU_FLAG_MMXEXT
#define AV_CPU_FLAG_MMXEXT
SSE integer functions or AMD MMX ext.
Definition: cpu.h:33
Windows::Graphics::DirectX::Direct3D11::p
IDirect3DDxgiInterfaceAccess _COM_Outptr_ void ** p
Definition: vsrc_gfxcapture_winrt.hpp:53
ff_swscale
int ff_swscale(SwsInternal *c, const uint8_t *const src[], const int srcStride[], int srcSliceY, int srcSliceH, uint8_t *const dst[], const int dstStride[], int dstSliceY, int dstSliceH)
Definition: swscale.c:263
SwsFormat::csp
enum AVColorSpace csp
Definition: format.h:83
av_clip_uint8
#define av_clip_uint8
Definition: common.h:106
av_clip_uint16
#define av_clip_uint16
Definition: common.h:112
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:165
desc
const char * desc
Definition: libsvtav1.c:82
scale_gamma
static int scale_gamma(SwsInternal *c, const uint8_t *const srcSlice[], const int srcStride[], int srcSliceY, int srcSliceH, uint8_t *const dstSlice[], const int dstStride[], int dstSliceY, int dstSliceH)
Definition: swscale.c:960
mem.h
BU_IDX
#define BU_IDX
Definition: swscale_internal.h:467
SwsPlane::sliceY
int sliceY
index of first line
Definition: swscale_internal.h:1106
SwsContext::dst_w
int dst_w
Definition: swscale.h:238
SwsInternal::dst_slice_height
int dst_slice_height
Definition: swscale_internal.h:347
SwsGraph
Filter graph, which represents a 'baked' pixel format conversion.
Definition: graph.h:120
SwsContext::src_format
int src_format
Source pixel format.
Definition: swscale.h:239
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
w
uint8_t w
Definition: llvidencdsp.c:39
ff_hyscale_fast_c
void ff_hyscale_fast_c(SwsInternal *c, int16_t *dst, int dstWidth, const uint8_t *src, int srcW, int xInc)
Definition: hscale_fast_bilinear.c:23
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
ff_sws_init_output_funcs
av_cold void ff_sws_init_output_funcs(SwsInternal *c, yuv2planar1_fn *yuv2plane1, yuv2planarX_fn *yuv2planeX, yuv2interleavedX_fn *yuv2nv12cX, yuv2packed1_fn *yuv2packed1, yuv2packed2_fn *yuv2packed2, yuv2packedX_fn *yuv2packedX, yuv2anyX_fn *yuv2anyX)
Definition: output.c:3289
av_fast_malloc
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:557
ff_sws_init_swscale_aarch64
av_cold void ff_sws_init_swscale_aarch64(SwsInternal *c)
Definition: swscale.c:350
SwsInternal::dst_slice_start
int dst_slice_start
Definition: swscale_internal.h:346
int32_t
int32_t
Definition: audioconvert.c:56
hwcontext.h
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:472
coeff
static const double coeff[2][5]
Definition: vf_owdenoise.c:80
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
sws_internal
static SwsInternal * sws_internal(const SwsContext *sws)
Definition: swscale_internal.h:74
sws_scale_frame
int sws_scale_frame(SwsContext *sws, AVFrame *dst, const AVFrame *src)
Scale source data from src and write the output to dst.
Definition: swscale.c:1375
h
h
Definition: vp9dsp_template.c:2070
stride
#define stride
Definition: h264pred_template.c:536
solve_range_convert
static void solve_range_convert(uint16_t src_min, uint16_t src_max, uint16_t dst_min, uint16_t dst_max, int src_bits, int src_shift, int mult_shift, uint32_t *coeff, int64_t *offset)
Definition: swscale.c:578
isPlanar
static av_always_inline int isPlanar(enum AVPixelFormat pix_fmt)
Definition: swscale_internal.h:915
GU_IDX
#define GU_IDX
Definition: swscale_internal.h:466
width
#define width
Definition: dsp.h:89
SwsContext
Main external API structure.
Definition: swscale.h:191
AV_PIX_FMT_FLAG_PAL
#define AV_PIX_FMT_FLAG_PAL
Pixel format has a palette in data[1], values are indexes in this palette.
Definition: pixdesc.h:120
ff_sws_graph_run
New swscale design to change SwsGraph is what coordinates multiple passes These can include cascaded scaling error diffusion and so on Or we could have separate passes for the vertical and horizontal scaling In between each SwsPass lies a fully allocated image buffer Graph passes may have different levels of e g we can have a single threaded error diffusion pass following a multi threaded scaling pass SwsGraph is internally recreated whenever the image dimensions or settings change in any way splits interlaced images into separate and calls ff_sws_graph_run() on each. From the point of view of SwsGraph itself
av_color_transfer_name
const char * av_color_transfer_name(enum AVColorTransferCharacteristic transfer)
Definition: pixdesc.c:3823
get_frame_img
static SwsImg get_frame_img(const AVFrame *frame, int field)
Definition: swscale.c:1324
src
#define src
Definition: vp8dsp.c:248
swscale.h
SwsColor::prim
enum AVColorPrimaries prim
Definition: format.h:61
av_get_pix_fmt_name
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:3376
AV_RB16
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_WB24 unsigned int_TMPL AV_RB16
Definition: bytestream.h:98
isALPHA
static av_always_inline int isALPHA(enum AVPixelFormat pix_fmt)
Definition: swscale_internal.h:897
RGB2YUV_SHIFT
#define RGB2YUV_SHIFT
Definition: swscale_internal.h:471