FFmpeg
swscale.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2001-2011 Michael Niedermayer <michaelni@gmx.at>
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include <stdint.h>
22 #include <stdio.h>
23 #include <string.h>
24 
25 #include "libavutil/avassert.h"
26 #include "libavutil/bswap.h"
27 #include "libavutil/common.h"
28 #include "libavutil/cpu.h"
29 #include "libavutil/emms.h"
30 #include "libavutil/intreadwrite.h"
31 #include "libavutil/mem.h"
32 #include "libavutil/mem_internal.h"
33 #include "libavutil/pixdesc.h"
34 #include "libavutil/hwcontext.h"
35 #include "config.h"
36 #include "swscale_internal.h"
37 #include "swscale.h"
38 #if CONFIG_VULKAN
39 #include "vulkan/ops.h"
40 #endif
41 
42 DECLARE_ALIGNED(8, const uint8_t, ff_dither_8x8_128)[9][8] = {
43  { 36, 68, 60, 92, 34, 66, 58, 90, },
44  { 100, 4, 124, 28, 98, 2, 122, 26, },
45  { 52, 84, 44, 76, 50, 82, 42, 74, },
46  { 116, 20, 108, 12, 114, 18, 106, 10, },
47  { 32, 64, 56, 88, 38, 70, 62, 94, },
48  { 96, 0, 120, 24, 102, 6, 126, 30, },
49  { 48, 80, 40, 72, 54, 86, 46, 78, },
50  { 112, 16, 104, 8, 118, 22, 110, 14, },
51  { 36, 68, 60, 92, 34, 66, 58, 90, },
52 };
53 
54 DECLARE_ALIGNED(8, static const uint8_t, sws_pb_64)[8] = {
55  64, 64, 64, 64, 64, 64, 64, 64
56 };
57 
58 static av_always_inline void fillPlane(uint8_t *plane, int stride, int width,
59  int height, int y, uint8_t val)
60 {
61  int i;
62  uint8_t *ptr = plane + stride * y;
63  for (i = 0; i < height; i++) {
64  memset(ptr, val, width);
65  ptr += stride;
66  }
67 }
68 
69 static void hScale16To19_c(SwsInternal *c, int16_t *_dst, int dstW,
70  const uint8_t *_src, const int16_t *filter,
71  const int32_t *filterPos, int filterSize)
72 {
73  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(c->opts.src_format);
74  int i;
75  int32_t *dst = (int32_t *) _dst;
76  const uint16_t *src = (const uint16_t *) _src;
77  int bits = desc->comp[0].depth - 1;
78  int sh = bits - 4;
79 
80  if ((isAnyRGB(c->opts.src_format) || c->opts.src_format==AV_PIX_FMT_PAL8) && desc->comp[0].depth<16) {
81  sh = 9;
82  } else if (desc->flags & AV_PIX_FMT_FLAG_FLOAT) { /* float input are process like uint 16bpc */
83  sh = 16 - 1 - 4;
84  }
85 
86  for (i = 0; i < dstW; i++) {
87  int j;
88  int srcPos = filterPos[i];
89  int val = 0;
90 
91  for (j = 0; j < filterSize; j++) {
92  val += src[srcPos + j] * filter[filterSize * i + j];
93  }
94  // filter=14 bit, input=16 bit, output=30 bit, >> 11 makes 19 bit
95  dst[i] = FFMIN(val >> sh, (1 << 19) - 1);
96  }
97 }
98 
99 static void hScale16To15_c(SwsInternal *c, int16_t *dst, int dstW,
100  const uint8_t *_src, const int16_t *filter,
101  const int32_t *filterPos, int filterSize)
102 {
103  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(c->opts.src_format);
104  int i;
105  const uint16_t *src = (const uint16_t *) _src;
106  int sh = desc->comp[0].depth - 1;
107 
108  if (sh<15) {
109  sh = isAnyRGB(c->opts.src_format) || c->opts.src_format==AV_PIX_FMT_PAL8 ? 13 : (desc->comp[0].depth - 1);
110  } else if (desc->flags & AV_PIX_FMT_FLAG_FLOAT) { /* float input are process like uint 16bpc */
111  sh = 16 - 1;
112  }
113 
114  for (i = 0; i < dstW; i++) {
115  int j;
116  int srcPos = filterPos[i];
117  int val = 0;
118 
119  for (j = 0; j < filterSize; j++) {
120  val += src[srcPos + j] * filter[filterSize * i + j];
121  }
122  // filter=14 bit, input=16 bit, output=30 bit, >> 15 makes 15 bit
123  dst[i] = FFMIN(val >> sh, (1 << 15) - 1);
124  }
125 }
126 
127 // bilinear / bicubic scaling
128 static void hScale8To15_c(SwsInternal *c, int16_t *dst, int dstW,
129  const uint8_t *src, const int16_t *filter,
130  const int32_t *filterPos, int filterSize)
131 {
132  int i;
133  for (i = 0; i < dstW; i++) {
134  int j;
135  int srcPos = filterPos[i];
136  int val = 0;
137  for (j = 0; j < filterSize; j++) {
138  val += ((int)src[srcPos + j]) * filter[filterSize * i + j];
139  }
140  dst[i] = FFMIN(val >> 7, (1 << 15) - 1); // the cubic equation does overflow ...
141  }
142 }
143 
144 static void hScale8To19_c(SwsInternal *c, int16_t *_dst, int dstW,
145  const uint8_t *src, const int16_t *filter,
146  const int32_t *filterPos, int filterSize)
147 {
148  int i;
149  int32_t *dst = (int32_t *) _dst;
150  for (i = 0; i < dstW; i++) {
151  int j;
152  int srcPos = filterPos[i];
153  int val = 0;
154  for (j = 0; j < filterSize; j++) {
155  val += ((int)src[srcPos + j]) * filter[filterSize * i + j];
156  }
157  dst[i] = FFMIN(val >> 3, (1 << 19) - 1); // the cubic equation does overflow ...
158  }
159 }
160 
161 // FIXME all pal and rgb srcFormats could do this conversion as well
162 // FIXME all scalers more complex than bilinear could do half of this transform
163 static void chrRangeToJpeg_c(int16_t *dstU, int16_t *dstV, int width,
164  uint32_t _coeff, int64_t _offset)
165 {
166  uint16_t coeff = _coeff;
167  int32_t offset = _offset;
168  int i;
169  for (i = 0; i < width; i++) {
170  int U = (dstU[i] * coeff + offset) >> 14;
171  int V = (dstV[i] * coeff + offset) >> 14;
172  dstU[i] = FFMIN(U, (1 << 15) - 1);
173  dstV[i] = FFMIN(V, (1 << 15) - 1);
174  }
175 }
176 
177 static void chrRangeFromJpeg_c(int16_t *dstU, int16_t *dstV, int width,
178  uint32_t _coeff, int64_t _offset)
179 {
180  uint16_t coeff = _coeff;
181  int32_t offset = _offset;
182  int i;
183  for (i = 0; i < width; i++) {
184  dstU[i] = (dstU[i] * coeff + offset) >> 14;
185  dstV[i] = (dstV[i] * coeff + offset) >> 14;
186  }
187 }
188 
189 static void lumRangeToJpeg_c(int16_t *dst, int width,
190  uint32_t _coeff, int64_t _offset)
191 {
192  uint16_t coeff = _coeff;
193  int32_t offset = _offset;
194  int i;
195  for (i = 0; i < width; i++) {
196  int Y = (dst[i] * coeff + offset) >> 14;
197  dst[i] = FFMIN(Y, (1 << 15) - 1);
198  }
199 }
200 
201 static void lumRangeFromJpeg_c(int16_t *dst, int width,
202  uint32_t _coeff, int64_t _offset)
203 {
204  uint16_t coeff = _coeff;
205  int32_t offset = _offset;
206  int i;
207  for (i = 0; i < width; i++)
208  dst[i] = (dst[i] * coeff + offset) >> 14;
209 }
210 
211 static void chrRangeToJpeg16_c(int16_t *_dstU, int16_t *_dstV, int width,
212  uint32_t coeff, int64_t offset)
213 {
214  int i;
215  int32_t *dstU = (int32_t *) _dstU;
216  int32_t *dstV = (int32_t *) _dstV;
217  for (i = 0; i < width; i++) {
218  int U = ((int64_t) dstU[i] * coeff + offset) >> 18;
219  int V = ((int64_t) dstV[i] * coeff + offset) >> 18;
220  dstU[i] = FFMIN(U, (1 << 19) - 1);
221  dstV[i] = FFMIN(V, (1 << 19) - 1);
222  }
223 }
224 
225 static void chrRangeFromJpeg16_c(int16_t *_dstU, int16_t *_dstV, int width,
226  uint32_t coeff, int64_t offset)
227 {
228  int i;
229  int32_t *dstU = (int32_t *) _dstU;
230  int32_t *dstV = (int32_t *) _dstV;
231  for (i = 0; i < width; i++) {
232  dstU[i] = ((int64_t) dstU[i] * coeff + offset) >> 18;
233  dstV[i] = ((int64_t) dstV[i] * coeff + offset) >> 18;
234  }
235 }
236 
237 static void lumRangeToJpeg16_c(int16_t *_dst, int width,
238  uint32_t coeff, int64_t offset)
239 {
240  int i;
241  int32_t *dst = (int32_t *) _dst;
242  for (i = 0; i < width; i++) {
243  int Y = ((int64_t) dst[i] * coeff + offset) >> 18;
244  dst[i] = FFMIN(Y, (1 << 19) - 1);
245  }
246 }
247 
248 static void lumRangeFromJpeg16_c(int16_t *_dst, int width,
249  uint32_t coeff, int64_t offset)
250 {
251  int i;
252  int32_t *dst = (int32_t *) _dst;
253  for (i = 0; i < width; i++)
254  dst[i] = ((int64_t) dst[i] * coeff + offset) >> 18;
255 }
256 
257 
258 #define DEBUG_SWSCALE_BUFFERS 0
259 #define DEBUG_BUFFERS(...) \
260  if (DEBUG_SWSCALE_BUFFERS) \
261  av_log(c, AV_LOG_DEBUG, __VA_ARGS__)
262 
263 int ff_swscale(SwsInternal *c, const uint8_t *const src[], const int srcStride[],
264  int srcSliceY, int srcSliceH, uint8_t *const dst[],
265  const int dstStride[], int dstSliceY, int dstSliceH)
266 {
267  const int scale_dst = dstSliceY > 0 || dstSliceH < c->opts.dst_h;
268 
269  /* load a few things into local vars to make the code more readable?
270  * and faster */
271  const int dstW = c->opts.dst_w;
272  int dstH = c->opts.dst_h;
273 
274  const enum AVPixelFormat dstFormat = c->opts.dst_format;
275  const int flags = c->opts.flags;
276  int32_t *vLumFilterPos = c->vLumFilterPos;
277  int32_t *vChrFilterPos = c->vChrFilterPos;
278 
279  const int vLumFilterSize = c->vLumFilterSize;
280  const int vChrFilterSize = c->vChrFilterSize;
281 
282  yuv2planar1_fn yuv2plane1 = c->yuv2plane1;
283  yuv2planarX_fn yuv2planeX = c->yuv2planeX;
284  yuv2interleavedX_fn yuv2nv12cX = c->yuv2nv12cX;
285  yuv2packed1_fn yuv2packed1 = c->yuv2packed1;
286  yuv2packed2_fn yuv2packed2 = c->yuv2packed2;
287  yuv2packedX_fn yuv2packedX = c->yuv2packedX;
288  yuv2anyX_fn yuv2anyX = c->yuv2anyX;
289  const int chrSrcSliceY = srcSliceY >> c->chrSrcVSubSample;
290  const int chrSrcSliceH = AV_CEIL_RSHIFT(srcSliceH, c->chrSrcVSubSample);
291  int should_dither = isNBPS(c->opts.src_format) ||
292  is16BPS(c->opts.src_format);
293  int lastDstY;
294 
295  /* vars which will change and which we need to store back in the context */
296  int dstY = c->dstY;
297  int lastInLumBuf = c->lastInLumBuf;
298  int lastInChrBuf = c->lastInChrBuf;
299 
300  int lumStart = 0;
301  int lumEnd = c->descIndex[0];
302  int chrStart = lumEnd;
303  int chrEnd = c->descIndex[1];
304  int vStart = chrEnd;
305  int vEnd = c->numDesc;
306  SwsSlice *src_slice = &c->slice[lumStart];
307  SwsSlice *hout_slice = &c->slice[c->numSlice-2];
308  SwsSlice *vout_slice = &c->slice[c->numSlice-1];
309  SwsFilterDescriptor *desc = c->desc;
310 
311  int needAlpha = c->needAlpha;
312 
313  int hasLumHoles = 1;
314  int hasChrHoles = 1;
315 
316  const uint8_t *src2[4];
317  int srcStride2[4];
318 
319  if (isPacked(c->opts.src_format)) {
320  src2[0] =
321  src2[1] =
322  src2[2] =
323  src2[3] = src[0];
324  srcStride2[0] =
325  srcStride2[1] =
326  srcStride2[2] =
327  srcStride2[3] = srcStride[0];
328  } else {
329  memcpy(src2, src, sizeof(src2));
330  memcpy(srcStride2, srcStride, sizeof(srcStride2));
331  }
332 
333  srcStride2[1] *= 1 << c->vChrDrop;
334  srcStride2[2] *= 1 << c->vChrDrop;
335 
336  DEBUG_BUFFERS("swscale() %p[%d] %p[%d] %p[%d] %p[%d] -> %p[%d] %p[%d] %p[%d] %p[%d]\n",
337  src2[0], srcStride2[0], src2[1], srcStride2[1],
338  src2[2], srcStride2[2], src2[3], srcStride2[3],
339  dst[0], dstStride[0], dst[1], dstStride[1],
340  dst[2], dstStride[2], dst[3], dstStride[3]);
341  DEBUG_BUFFERS("srcSliceY: %d srcSliceH: %d dstY: %d dstH: %d\n",
342  srcSliceY, srcSliceH, dstY, dstH);
343  DEBUG_BUFFERS("vLumFilterSize: %d vChrFilterSize: %d\n",
344  vLumFilterSize, vChrFilterSize);
345 
346  if (dstStride[0]&15 || dstStride[1]&15 ||
347  dstStride[2]&15 || dstStride[3]&15) {
348  SwsInternal *const ctx = c->parent ? sws_internal(c->parent) : c;
349  if (flags & SWS_PRINT_INFO &&
350  !atomic_exchange_explicit(&ctx->stride_unaligned_warned, 1, memory_order_relaxed)) {
352  "Warning: dstStride is not aligned!\n"
353  " ->cannot do aligned memory accesses anymore\n");
354  }
355  }
356 
357 #if ARCH_X86
358  if ( (uintptr_t) dst[0]&15 || (uintptr_t) dst[1]&15 || (uintptr_t) dst[2]&15
359  || (uintptr_t)src2[0]&15 || (uintptr_t)src2[1]&15 || (uintptr_t)src2[2]&15
360  || srcStride2[0]&15 || srcStride2[1]&15 || srcStride2[2]&15 || srcStride2[3]&15
361  ) {
362  SwsInternal *const ctx = c->parent ? sws_internal(c->parent) : c;
363  int cpu_flags = av_get_cpu_flags();
364  if (flags & SWS_PRINT_INFO && HAVE_MMXEXT && (cpu_flags & AV_CPU_FLAG_SSE2) &&
365  !atomic_exchange_explicit(&ctx->stride_unaligned_warned,1, memory_order_relaxed)) {
366  av_log(c, AV_LOG_WARNING, "Warning: data is not aligned! This can lead to a speed loss\n");
367  }
368  }
369 #endif
370 
371  if (scale_dst) {
372  dstY = dstSliceY;
373  dstH = dstY + dstSliceH;
374  lastInLumBuf = -1;
375  lastInChrBuf = -1;
376  } else if (srcSliceY == 0) {
377  /* Note the user might start scaling the picture in the middle so this
378  * will not get executed. This is not really intended but works
379  * currently, so people might do it. */
380  dstY = 0;
381  lastInLumBuf = -1;
382  lastInChrBuf = -1;
383  }
384 
385  if (!should_dither) {
386  c->chrDither8 = c->lumDither8 = sws_pb_64;
387  }
388  lastDstY = dstY;
389 
390  ff_init_vscale_pfn(c, yuv2plane1, yuv2planeX, yuv2nv12cX,
391  yuv2packed1, yuv2packed2, yuv2packedX, yuv2anyX, c->use_mmx_vfilter);
392 
393  ff_init_slice_from_src(src_slice, (uint8_t**)src2, srcStride2, c->opts.src_w,
394  srcSliceY, srcSliceH, chrSrcSliceY, chrSrcSliceH, 1);
395 
396  ff_init_slice_from_src(vout_slice, (uint8_t**)dst, dstStride, c->opts.dst_w,
397  dstY, dstSliceH, dstY >> c->chrDstVSubSample,
398  AV_CEIL_RSHIFT(dstSliceH, c->chrDstVSubSample), scale_dst);
399  if (srcSliceY == 0) {
400  hout_slice->plane[0].sliceY = lastInLumBuf + 1;
401  hout_slice->plane[1].sliceY = lastInChrBuf + 1;
402  hout_slice->plane[2].sliceY = lastInChrBuf + 1;
403  hout_slice->plane[3].sliceY = lastInLumBuf + 1;
404 
405  hout_slice->plane[0].sliceH =
406  hout_slice->plane[1].sliceH =
407  hout_slice->plane[2].sliceH =
408  hout_slice->plane[3].sliceH = 0;
409  hout_slice->width = dstW;
410  }
411 
412  for (; dstY < dstH; dstY++) {
413  const int chrDstY = dstY >> c->chrDstVSubSample;
414  int use_mmx_vfilter= c->use_mmx_vfilter;
415 
416  // First line needed as input
417  const int firstLumSrcY = FFMAX(1 - vLumFilterSize, vLumFilterPos[dstY]);
418  const int firstLumSrcY2 = FFMAX(1 - vLumFilterSize, vLumFilterPos[FFMIN(dstY | ((1 << c->chrDstVSubSample) - 1), c->opts.dst_h - 1)]);
419  // First line needed as input
420  const int firstChrSrcY = FFMAX(1 - vChrFilterSize, vChrFilterPos[chrDstY]);
421 
422  // Last line needed as input
423  int lastLumSrcY = FFMIN(c->opts.src_h, firstLumSrcY + vLumFilterSize) - 1;
424  int lastLumSrcY2 = FFMIN(c->opts.src_h, firstLumSrcY2 + vLumFilterSize) - 1;
425  int lastChrSrcY = FFMIN(c->chrSrcH, firstChrSrcY + vChrFilterSize) - 1;
426  int enough_lines;
427 
428  int i;
429  int posY, cPosY, firstPosY, lastPosY, firstCPosY, lastCPosY;
430 
431  // handle holes (FAST_BILINEAR & weird filters)
432  if (firstLumSrcY > lastInLumBuf) {
433 
434  hasLumHoles = lastInLumBuf != firstLumSrcY - 1;
435  if (hasLumHoles) {
436  hout_slice->plane[0].sliceY = firstLumSrcY;
437  hout_slice->plane[3].sliceY = firstLumSrcY;
438  hout_slice->plane[0].sliceH =
439  hout_slice->plane[3].sliceH = 0;
440  }
441 
442  lastInLumBuf = firstLumSrcY - 1;
443  }
444  if (firstChrSrcY > lastInChrBuf) {
445 
446  hasChrHoles = lastInChrBuf != firstChrSrcY - 1;
447  if (hasChrHoles) {
448  hout_slice->plane[1].sliceY = firstChrSrcY;
449  hout_slice->plane[2].sliceY = firstChrSrcY;
450  hout_slice->plane[1].sliceH =
451  hout_slice->plane[2].sliceH = 0;
452  }
453 
454  lastInChrBuf = firstChrSrcY - 1;
455  }
456 
457  DEBUG_BUFFERS("dstY: %d\n", dstY);
458  DEBUG_BUFFERS("\tfirstLumSrcY: %d lastLumSrcY: %d lastInLumBuf: %d\n",
459  firstLumSrcY, lastLumSrcY, lastInLumBuf);
460  DEBUG_BUFFERS("\tfirstChrSrcY: %d lastChrSrcY: %d lastInChrBuf: %d\n",
461  firstChrSrcY, lastChrSrcY, lastInChrBuf);
462 
463  // Do we have enough lines in this slice to output the dstY line
464  enough_lines = lastLumSrcY2 < srcSliceY + srcSliceH &&
465  lastChrSrcY < AV_CEIL_RSHIFT(srcSliceY + srcSliceH, c->chrSrcVSubSample);
466 
467  if (!enough_lines) {
468  lastLumSrcY = srcSliceY + srcSliceH - 1;
469  lastChrSrcY = chrSrcSliceY + chrSrcSliceH - 1;
470  DEBUG_BUFFERS("buffering slice: lastLumSrcY %d lastChrSrcY %d\n",
471  lastLumSrcY, lastChrSrcY);
472  }
473 
474  av_assert0((lastLumSrcY - firstLumSrcY + 1) <= hout_slice->plane[0].available_lines);
475  av_assert0((lastChrSrcY - firstChrSrcY + 1) <= hout_slice->plane[1].available_lines);
476 
477 
478  posY = hout_slice->plane[0].sliceY + hout_slice->plane[0].sliceH;
479  if (posY <= lastLumSrcY && !hasLumHoles) {
480  firstPosY = FFMAX(firstLumSrcY, posY);
481  lastPosY = FFMIN(firstLumSrcY + hout_slice->plane[0].available_lines - 1, srcSliceY + srcSliceH - 1);
482  } else {
483  firstPosY = posY;
484  lastPosY = lastLumSrcY;
485  }
486 
487  cPosY = hout_slice->plane[1].sliceY + hout_slice->plane[1].sliceH;
488  if (cPosY <= lastChrSrcY && !hasChrHoles) {
489  firstCPosY = FFMAX(firstChrSrcY, cPosY);
490  lastCPosY = FFMIN(firstChrSrcY + hout_slice->plane[1].available_lines - 1, AV_CEIL_RSHIFT(srcSliceY + srcSliceH, c->chrSrcVSubSample) - 1);
491  } else {
492  firstCPosY = cPosY;
493  lastCPosY = lastChrSrcY;
494  }
495 
496  ff_rotate_slice(hout_slice, lastPosY, lastCPosY);
497 
498  if (posY < lastLumSrcY + 1) {
499  for (i = lumStart; i < lumEnd; ++i)
500  desc[i].process(c, &desc[i], firstPosY, lastPosY - firstPosY + 1);
501  }
502 
503  lastInLumBuf = lastLumSrcY;
504 
505  if (cPosY < lastChrSrcY + 1) {
506  for (i = chrStart; i < chrEnd; ++i)
507  desc[i].process(c, &desc[i], firstCPosY, lastCPosY - firstCPosY + 1);
508  }
509 
510  lastInChrBuf = lastChrSrcY;
511 
512  if (!enough_lines)
513  break; // we can't output a dstY line so let's try with the next slice
514 
515 #if ARCH_X86 && HAVE_MMX
517  c->dstW_mmx = c->opts.dst_w;
518 #endif
519  if (should_dither) {
520  c->chrDither8 = ff_dither_8x8_128[chrDstY & 7];
521  c->lumDither8 = ff_dither_8x8_128[dstY & 7];
522  }
523  if (dstY >= c->opts.dst_h - 2) {
524  /* hmm looks like we can't use MMX here without overwriting
525  * this array's tail */
526  ff_sws_init_output_funcs(c, &yuv2plane1, &yuv2planeX, &yuv2nv12cX,
527  &yuv2packed1, &yuv2packed2, &yuv2packedX, &yuv2anyX);
528  use_mmx_vfilter= 0;
529  ff_init_vscale_pfn(c, yuv2plane1, yuv2planeX, yuv2nv12cX,
530  yuv2packed1, yuv2packed2, yuv2packedX, yuv2anyX, use_mmx_vfilter);
531  }
532 
533  for (i = vStart; i < vEnd; ++i)
534  desc[i].process(c, &desc[i], dstY, 1);
535  }
536  if (isPlanar(dstFormat) && isALPHA(dstFormat) && !needAlpha) {
537  int offset = lastDstY - dstSliceY;
538  int length = dstW;
539  int height = dstY - lastDstY;
540 
541  if (is16BPS(dstFormat) || isNBPS(dstFormat)) {
542  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(dstFormat);
543  fillPlane16(dst[3], dstStride[3], length, height, offset,
544  1, desc->comp[3].depth,
545  isBE(dstFormat));
546  } else if (is32BPS(dstFormat)) {
547  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(dstFormat);
548  fillPlane32(dst[3], dstStride[3], length, height, offset,
549  1, desc->comp[3].depth,
550  isBE(dstFormat), desc->flags & AV_PIX_FMT_FLAG_FLOAT);
551  } else
552  fillPlane(dst[3], dstStride[3], length, height, offset, 255);
553  }
554 
555 #if HAVE_MMXEXT_INLINE
557  __asm__ volatile ("sfence" ::: "memory");
558 #endif
559  emms_c();
560 
561  /* store changed local vars back in the context */
562  c->dstY = dstY;
563  c->lastInLumBuf = lastInLumBuf;
564  c->lastInChrBuf = lastInChrBuf;
565 
566  return dstY - lastDstY;
567 }
568 
569 /*
570  * Solve for coeff and offset:
571  * dst = ((src << src_shift) * coeff + offset) >> (mult_shift + src_shift)
572  *
573  * If SwsInternal->dstBpc is > 14, coeff is uint16_t and offset is int32_t,
574  * otherwise (SwsInternal->dstBpc is <= 14) coeff is uint32_t and offset is
575  * int64_t.
576  */
577 static void solve_range_convert(uint16_t src_min, uint16_t src_max,
578  uint16_t dst_min, uint16_t dst_max,
579  int src_bits, int src_shift, int mult_shift,
580  uint32_t *coeff, int64_t *offset)
581 {
582  uint16_t src_range = src_max - src_min;
583  uint16_t dst_range = dst_max - dst_min;
584  int total_shift = mult_shift + src_shift;
585  *coeff = AV_CEIL_RSHIFT(((uint64_t) dst_range << total_shift) / src_range, src_shift);
586  *offset = ((int64_t) dst_max << total_shift) -
587  ((int64_t) src_max << src_shift) * *coeff +
588  (1U << (mult_shift - 1));
589 }
590 
592 {
593  const int bit_depth = c->dstBpc ? FFMIN(c->dstBpc, 16) : 8;
594  const int src_bits = bit_depth <= 14 ? 15 : 19;
595  const int src_shift = src_bits - bit_depth;
596  const int mult_shift = bit_depth <= 14 ? 14 : 18;
597  const uint16_t mpeg_min = 16U << (bit_depth - 8);
598  const uint16_t mpeg_max_lum = 235U << (bit_depth - 8);
599  const uint16_t mpeg_max_chr = 240U << (bit_depth - 8);
600  const uint16_t jpeg_max = (1U << bit_depth) - 1;
601  uint16_t src_min, src_max_lum, src_max_chr;
602  uint16_t dst_min, dst_max_lum, dst_max_chr;
603  if (c->opts.src_range) {
604  src_min = 0;
605  src_max_lum = jpeg_max;
606  src_max_chr = jpeg_max;
607  dst_min = mpeg_min;
608  dst_max_lum = mpeg_max_lum;
609  dst_max_chr = mpeg_max_chr;
610  } else {
611  src_min = mpeg_min;
612  src_max_lum = mpeg_max_lum;
613  src_max_chr = mpeg_max_chr;
614  dst_min = 0;
615  dst_max_lum = jpeg_max;
616  dst_max_chr = jpeg_max;
617  }
618  solve_range_convert(src_min, src_max_lum, dst_min, dst_max_lum,
619  src_bits, src_shift, mult_shift,
620  &c->lumConvertRange_coeff, &c->lumConvertRange_offset);
621  solve_range_convert(src_min, src_max_chr, dst_min, dst_max_chr,
622  src_bits, src_shift, mult_shift,
623  &c->chrConvertRange_coeff, &c->chrConvertRange_offset);
624 }
625 
627 {
628  c->lumConvertRange = NULL;
629  c->chrConvertRange = NULL;
630  if (c->opts.src_range != c->opts.dst_range && !isAnyRGB(c->opts.dst_format) && c->dstBpc < 32) {
632  if (c->dstBpc <= 14) {
633  if (c->opts.src_range) {
634  c->lumConvertRange = lumRangeFromJpeg_c;
635  c->chrConvertRange = chrRangeFromJpeg_c;
636  } else {
637  c->lumConvertRange = lumRangeToJpeg_c;
638  c->chrConvertRange = chrRangeToJpeg_c;
639  }
640  } else {
641  if (c->opts.src_range) {
642  c->lumConvertRange = lumRangeFromJpeg16_c;
643  c->chrConvertRange = chrRangeFromJpeg16_c;
644  } else {
645  c->lumConvertRange = lumRangeToJpeg16_c;
646  c->chrConvertRange = chrRangeToJpeg16_c;
647  }
648  }
649 
650 #if ARCH_AARCH64
652 #elif ARCH_LOONGARCH64
654 #elif ARCH_RISCV
656 #elif ARCH_X86
658 #endif
659  }
660 }
661 
663 {
664  enum AVPixelFormat srcFormat = c->opts.src_format;
665 
667 
668  ff_sws_init_output_funcs(c, &c->yuv2plane1, &c->yuv2planeX,
669  &c->yuv2nv12cX, &c->yuv2packed1,
670  &c->yuv2packed2, &c->yuv2packedX, &c->yuv2anyX);
671 
672  ff_sws_init_input_funcs(c, &c->lumToYV12, &c->alpToYV12, &c->chrToYV12,
673  &c->readLumPlanar, &c->readAlpPlanar, &c->readChrPlanar);
674 
675  if (c->srcBpc == 8) {
676  if (c->dstBpc <= 14) {
677  c->hyScale = c->hcScale = hScale8To15_c;
678  if (c->opts.flags & SWS_FAST_BILINEAR) {
679  c->hyscale_fast = ff_hyscale_fast_c;
680  c->hcscale_fast = ff_hcscale_fast_c;
681  }
682  } else {
683  c->hyScale = c->hcScale = hScale8To19_c;
684  }
685  } else {
686  c->hyScale = c->hcScale = c->dstBpc > 14 ? hScale16To19_c
687  : hScale16To15_c;
688  }
689 
691 
692  if (!(isGray(srcFormat) || isGray(c->opts.dst_format) ||
693  srcFormat == AV_PIX_FMT_MONOBLACK || srcFormat == AV_PIX_FMT_MONOWHITE))
694  c->needs_hcscale = 1;
695 }
696 
698 {
700 
701 #if ARCH_PPC
703 #elif ARCH_X86
705 #elif ARCH_AARCH64
707 #elif ARCH_ARM
709 #elif ARCH_LOONGARCH64
711 #elif ARCH_RISCV
713 #endif
714 }
715 
716 static void reset_ptr(const uint8_t *src[], enum AVPixelFormat format)
717 {
718  if (!isALPHA(format))
719  src[3] = NULL;
720  if (!isPlanar(format)) {
721  src[3] = src[2] = NULL;
722 
723  if (!usePal(format))
724  src[1] = NULL;
725  }
726 }
727 
728 static int check_image_pointers(const uint8_t * const data[4], enum AVPixelFormat pix_fmt,
729  const int linesizes[4])
730 {
732  int i;
733 
734  av_assert2(desc);
735 
736  for (i = 0; i < 4; i++) {
737  int plane = desc->comp[i].plane;
738  if (!data[plane] || !linesizes[plane])
739  return 0;
740  }
741 
742  return 1;
743 }
744 
745 static void xyz12Torgb48_c(const SwsInternal *c, uint8_t *dst, int dst_stride,
746  const uint8_t *src, int src_stride, int w, int h)
747 {
748  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(c->opts.src_format);
749 
750  for (int yp = 0; yp < h; yp++) {
751  const uint16_t *src16 = (const uint16_t *) src;
752  uint16_t *dst16 = (uint16_t *) dst;
753 
754  for (int xp = 0; xp < 3 * w; xp += 3) {
755  int x, y, z, r, g, b;
756 
757  if (desc->flags & AV_PIX_FMT_FLAG_BE) {
758  x = AV_RB16(src16 + xp + 0);
759  y = AV_RB16(src16 + xp + 1);
760  z = AV_RB16(src16 + xp + 2);
761  } else {
762  x = AV_RL16(src16 + xp + 0);
763  y = AV_RL16(src16 + xp + 1);
764  z = AV_RL16(src16 + xp + 2);
765  }
766 
767  x = c->xyz2rgb.gamma.in[x >> 4];
768  y = c->xyz2rgb.gamma.in[y >> 4];
769  z = c->xyz2rgb.gamma.in[z >> 4];
770 
771  // convert from XYZlinear to sRGBlinear
772  r = c->xyz2rgb.mat[0][0] * x +
773  c->xyz2rgb.mat[0][1] * y +
774  c->xyz2rgb.mat[0][2] * z >> 12;
775  g = c->xyz2rgb.mat[1][0] * x +
776  c->xyz2rgb.mat[1][1] * y +
777  c->xyz2rgb.mat[1][2] * z >> 12;
778  b = c->xyz2rgb.mat[2][0] * x +
779  c->xyz2rgb.mat[2][1] * y +
780  c->xyz2rgb.mat[2][2] * z >> 12;
781 
782  // limit values to 16-bit depth
783  r = av_clip_uint16(r);
784  g = av_clip_uint16(g);
785  b = av_clip_uint16(b);
786 
787  // convert from sRGBlinear to RGB and scale from 12bit to 16bit
788  if (desc->flags & AV_PIX_FMT_FLAG_BE) {
789  AV_WB16(dst16 + xp + 0, c->xyz2rgb.gamma.out[r] << 4);
790  AV_WB16(dst16 + xp + 1, c->xyz2rgb.gamma.out[g] << 4);
791  AV_WB16(dst16 + xp + 2, c->xyz2rgb.gamma.out[b] << 4);
792  } else {
793  AV_WL16(dst16 + xp + 0, c->xyz2rgb.gamma.out[r] << 4);
794  AV_WL16(dst16 + xp + 1, c->xyz2rgb.gamma.out[g] << 4);
795  AV_WL16(dst16 + xp + 2, c->xyz2rgb.gamma.out[b] << 4);
796  }
797  }
798 
799  src += src_stride;
800  dst += dst_stride;
801  }
802 }
803 
804 static void rgb48Toxyz12_c(const SwsInternal *c, uint8_t *dst, int dst_stride,
805  const uint8_t *src, int src_stride, int w, int h)
806 {
807  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(c->opts.dst_format);
808 
809  for (int yp = 0; yp < h; yp++) {
810  uint16_t *src16 = (uint16_t *) src;
811  uint16_t *dst16 = (uint16_t *) dst;
812 
813  for (int xp = 0; xp < 3 * w; xp += 3) {
814  int x, y, z, r, g, b;
815 
816  if (desc->flags & AV_PIX_FMT_FLAG_BE) {
817  r = AV_RB16(src16 + xp + 0);
818  g = AV_RB16(src16 + xp + 1);
819  b = AV_RB16(src16 + xp + 2);
820  } else {
821  r = AV_RL16(src16 + xp + 0);
822  g = AV_RL16(src16 + xp + 1);
823  b = AV_RL16(src16 + xp + 2);
824  }
825 
826  r = c->rgb2xyz.gamma.in[r >> 4];
827  g = c->rgb2xyz.gamma.in[g >> 4];
828  b = c->rgb2xyz.gamma.in[b >> 4];
829 
830  // convert from sRGBlinear to XYZlinear
831  x = c->rgb2xyz.mat[0][0] * r +
832  c->rgb2xyz.mat[0][1] * g +
833  c->rgb2xyz.mat[0][2] * b >> 12;
834  y = c->rgb2xyz.mat[1][0] * r +
835  c->rgb2xyz.mat[1][1] * g +
836  c->rgb2xyz.mat[1][2] * b >> 12;
837  z = c->rgb2xyz.mat[2][0] * r +
838  c->rgb2xyz.mat[2][1] * g +
839  c->rgb2xyz.mat[2][2] * b >> 12;
840 
841  // limit values to 16-bit depth
842  x = av_clip_uint16(x);
843  y = av_clip_uint16(y);
844  z = av_clip_uint16(z);
845 
846  // convert from XYZlinear to X'Y'Z' and scale from 12bit to 16bit
847  if (desc->flags & AV_PIX_FMT_FLAG_BE) {
848  AV_WB16(dst16 + xp + 0, c->rgb2xyz.gamma.out[x] << 4);
849  AV_WB16(dst16 + xp + 1, c->rgb2xyz.gamma.out[y] << 4);
850  AV_WB16(dst16 + xp + 2, c->rgb2xyz.gamma.out[z] << 4);
851  } else {
852  AV_WL16(dst16 + xp + 0, c->rgb2xyz.gamma.out[x] << 4);
853  AV_WL16(dst16 + xp + 1, c->rgb2xyz.gamma.out[y] << 4);
854  AV_WL16(dst16 + xp + 2, c->rgb2xyz.gamma.out[z] << 4);
855  }
856  }
857 
858  src += src_stride;
859  dst += dst_stride;
860  }
861 }
862 
864 {
865  c->xyz12Torgb48 = xyz12Torgb48_c;
866  c->rgb48Toxyz12 = rgb48Toxyz12_c;
867 
868 #if ARCH_AARCH64
870 #endif
871 }
872 
873 void ff_update_palette(SwsInternal *c, const uint32_t *pal)
874 {
875  uint32_t *rgb2yuv = c->input_rgb2yuv_table;
876 
877  int32_t ry = rgb2yuv[RY_IDX], gy = rgb2yuv[GY_IDX], by = rgb2yuv[BY_IDX];
878  int32_t ru = rgb2yuv[RU_IDX], gu = rgb2yuv[GU_IDX], bu = rgb2yuv[BU_IDX];
879  int32_t rv = rgb2yuv[RV_IDX], gv = rgb2yuv[GV_IDX], bv = rgb2yuv[BV_IDX];
880 
881  for (int i = 0; i < 256; i++) {
882  int r, g, b, y, u, v, a = 0xff;
883  if (c->opts.src_format == AV_PIX_FMT_PAL8) {
884  uint32_t p = pal[i];
885  a = (p >> 24) & 0xFF;
886  r = (p >> 16) & 0xFF;
887  g = (p >> 8) & 0xFF;
888  b = p & 0xFF;
889  } else if (c->opts.src_format == AV_PIX_FMT_RGB8) {
890  r = ( i >> 5 ) * 36;
891  g = ((i >> 2) & 7) * 36;
892  b = ( i & 3) * 85;
893  } else if (c->opts.src_format == AV_PIX_FMT_BGR8) {
894  b = ( i >> 6 ) * 85;
895  g = ((i >> 3) & 7) * 36;
896  r = ( i & 7) * 36;
897  } else if (c->opts.src_format == AV_PIX_FMT_RGB4_BYTE) {
898  r = ( i >> 3 ) * 255;
899  g = ((i >> 1) & 3) * 85;
900  b = ( i & 1) * 255;
901  } else if (c->opts.src_format == AV_PIX_FMT_GRAY8 || c->opts.src_format == AV_PIX_FMT_GRAY8A) {
902  r = g = b = i;
903  } else {
904  av_assert1(c->opts.src_format == AV_PIX_FMT_BGR4_BYTE);
905  b = ( i >> 3 ) * 255;
906  g = ((i >> 1) & 3) * 85;
907  r = ( i & 1) * 255;
908  }
909 
910  y = av_clip_uint8((ry * r + gy * g + by * b + ( 33 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT);
911  u = av_clip_uint8((ru * r + gu * g + bu * b + (257 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT);
912  v = av_clip_uint8((rv * r + gv * g + bv * b + (257 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT);
913 
914  c->pal_yuv[i]= y + (u<<8) + (v<<16) + ((unsigned)a<<24);
915 
916  switch (c->opts.dst_format) {
917  case AV_PIX_FMT_BGR32:
918 #if !HAVE_BIGENDIAN
919  case AV_PIX_FMT_RGB24:
920 #endif
921  c->pal_rgb[i]= r + (g<<8) + (b<<16) + ((unsigned)a<<24);
922  break;
923  case AV_PIX_FMT_BGR32_1:
924 #if HAVE_BIGENDIAN
925  case AV_PIX_FMT_BGR24:
926 #endif
927  c->pal_rgb[i]= a + (r<<8) + (g<<16) + ((unsigned)b<<24);
928  break;
929  case AV_PIX_FMT_RGB32_1:
930 #if HAVE_BIGENDIAN
931  case AV_PIX_FMT_RGB24:
932 #endif
933  c->pal_rgb[i]= a + (b<<8) + (g<<16) + ((unsigned)r<<24);
934  break;
935  case AV_PIX_FMT_GBRP:
936  case AV_PIX_FMT_GBRAP:
937 #if HAVE_BIGENDIAN
938  c->pal_rgb[i]= a + (r<<8) + (b<<16) + ((unsigned)g<<24);
939 #else
940  c->pal_rgb[i]= g + (b<<8) + (r<<16) + ((unsigned)a<<24);
941 #endif
942  break;
943  case AV_PIX_FMT_RGB32:
944 #if !HAVE_BIGENDIAN
945  case AV_PIX_FMT_BGR24:
946 #endif
947  default:
948  c->pal_rgb[i]= b + (g<<8) + (r<<16) + ((unsigned)a<<24);
949  }
950  }
951 }
952 
953 static int scale_internal(SwsContext *sws,
954  const uint8_t * const srcSlice[], const int srcStride[],
955  int srcSliceY, int srcSliceH,
956  uint8_t *const dstSlice[], const int dstStride[],
957  int dstSliceY, int dstSliceH);
958 
960  const uint8_t * const srcSlice[], const int srcStride[],
961  int srcSliceY, int srcSliceH,
962  uint8_t * const dstSlice[], const int dstStride[],
963  int dstSliceY, int dstSliceH)
964 {
965  int ret = scale_internal(c->cascaded_context[0],
966  srcSlice, srcStride, srcSliceY, srcSliceH,
967  c->cascaded_tmp[0], c->cascaded_tmpStride[0], 0, c->opts.src_h);
968 
969  if (ret < 0)
970  return ret;
971 
972  if (c->cascaded_context[2])
973  ret = scale_internal(c->cascaded_context[1], (const uint8_t * const *)c->cascaded_tmp[0],
974  c->cascaded_tmpStride[0], srcSliceY, srcSliceH,
975  c->cascaded_tmp[1], c->cascaded_tmpStride[1], 0, c->opts.dst_h);
976  else
977  ret = scale_internal(c->cascaded_context[1], (const uint8_t * const *)c->cascaded_tmp[0],
978  c->cascaded_tmpStride[0], srcSliceY, srcSliceH,
979  dstSlice, dstStride, dstSliceY, dstSliceH);
980 
981  if (ret < 0)
982  return ret;
983 
984  if (c->cascaded_context[2]) {
985  const int dstY1 = sws_internal(c->cascaded_context[1])->dstY;
986  ret = scale_internal(c->cascaded_context[2], (const uint8_t * const *)c->cascaded_tmp[1],
987  c->cascaded_tmpStride[1], dstY1 - ret, dstY1,
988  dstSlice, dstStride, dstSliceY, dstSliceH);
989  }
990  return ret;
991 }
992 
994  const uint8_t * const srcSlice[], const int srcStride[],
995  int srcSliceY, int srcSliceH,
996  uint8_t * const dstSlice[], const int dstStride[],
997  int dstSliceY, int dstSliceH)
998 {
999  const int dstH0 = c->cascaded_context[0]->dst_h;
1000  int ret = scale_internal(c->cascaded_context[0],
1001  srcSlice, srcStride, srcSliceY, srcSliceH,
1002  c->cascaded_tmp[0], c->cascaded_tmpStride[0],
1003  0, dstH0);
1004  if (ret < 0)
1005  return ret;
1006  ret = scale_internal(c->cascaded_context[1],
1007  (const uint8_t * const * )c->cascaded_tmp[0], c->cascaded_tmpStride[0],
1008  0, dstH0, dstSlice, dstStride, dstSliceY, dstSliceH);
1009  return ret;
1010 }
1011 
1012 static int scale_internal(SwsContext *sws,
1013  const uint8_t * const srcSlice[], const int srcStride[],
1014  int srcSliceY, int srcSliceH,
1015  uint8_t *const dstSlice[], const int dstStride[],
1016  int dstSliceY, int dstSliceH)
1017 {
1018  SwsInternal *c = sws_internal(sws);
1019  const int scale_dst = dstSliceY > 0 || dstSliceH < sws->dst_h;
1020  const int frame_start = scale_dst || !c->sliceDir;
1021  int i, ret;
1022  const uint8_t *src2[4];
1023  uint8_t *dst2[4];
1024  int macro_height_src = isBayer(sws->src_format) ? 2 : (1 << c->chrSrcVSubSample);
1025  int macro_height_dst = isBayer(sws->dst_format) ? 2 : (1 << c->chrDstVSubSample);
1026  // copy strides, so they can safely be modified
1027  int srcStride2[4];
1028  int dstStride2[4];
1029  int srcSliceY_internal = srcSliceY;
1030 
1031  if (!srcStride || !dstStride || !dstSlice || !srcSlice) {
1032  av_log(c, AV_LOG_ERROR, "One of the input parameters to sws_scale() is NULL, please check the calling code\n");
1033  return AVERROR(EINVAL);
1034  }
1035 
1036  if ((srcSliceY & (macro_height_src - 1)) ||
1037  ((srcSliceH & (macro_height_src - 1)) && srcSliceY + srcSliceH != sws->src_h) ||
1038  srcSliceY + srcSliceH > sws->src_h ||
1039  srcSliceY < 0 ||
1040  srcSliceH < 0 ||
1041  (isBayer(sws->src_format) && srcSliceH <= 1)) {
1042  av_log(c, AV_LOG_ERROR, "Slice parameters %d, %d are invalid\n", srcSliceY, srcSliceH);
1043  return AVERROR(EINVAL);
1044  }
1045 
1046  if ((dstSliceY & (macro_height_dst - 1)) ||
1047  ((dstSliceH & (macro_height_dst - 1)) && dstSliceY + dstSliceH != sws->dst_h) ||
1048  dstSliceY + dstSliceH > sws->dst_h) {
1049  av_log(c, AV_LOG_ERROR, "Slice parameters %d, %d are invalid\n", dstSliceY, dstSliceH);
1050  return AVERROR(EINVAL);
1051  }
1052 
1053  if (!check_image_pointers(srcSlice, sws->src_format, srcStride)) {
1054  av_log(c, AV_LOG_ERROR, "bad src image pointers\n");
1055  return AVERROR(EINVAL);
1056  }
1057  if (!check_image_pointers((const uint8_t* const*)dstSlice, sws->dst_format, dstStride)) {
1058  av_log(c, AV_LOG_ERROR, "bad dst image pointers\n");
1059  return AVERROR(EINVAL);
1060  }
1061 
1062  // do not mess up sliceDir if we have a "trailing" 0-size slice
1063  if (srcSliceH == 0)
1064  return 0;
1065 
1066  if (sws->gamma_flag && c->cascaded_context[0])
1067  return scale_gamma(c, srcSlice, srcStride, srcSliceY, srcSliceH,
1068  dstSlice, dstStride, dstSliceY, dstSliceH);
1069 
1070  if (c->cascaded_context[0] && srcSliceY == 0 && srcSliceH == c->cascaded_context[0]->src_h)
1071  return scale_cascaded(c, srcSlice, srcStride, srcSliceY, srcSliceH,
1072  dstSlice, dstStride, dstSliceY, dstSliceH);
1073 
1074  if (!srcSliceY && (sws->flags & SWS_BITEXACT) && sws->dither == SWS_DITHER_ED && c->dither_error[0])
1075  for (i = 0; i < 4; i++)
1076  memset(c->dither_error[i], 0, sizeof(c->dither_error[0][0]) * (sws->dst_w+2));
1077 
1078  if (usePal(sws->src_format))
1079  ff_update_palette(c, (const uint32_t *)srcSlice[1]);
1080 
1081  memcpy(src2, srcSlice, sizeof(src2));
1082  memcpy(dst2, dstSlice, sizeof(dst2));
1083  memcpy(srcStride2, srcStride, sizeof(srcStride2));
1084  memcpy(dstStride2, dstStride, sizeof(dstStride2));
1085 
1086  if (frame_start && !scale_dst) {
1087  if (srcSliceY != 0 && srcSliceY + srcSliceH != sws->src_h) {
1088  av_log(c, AV_LOG_ERROR, "Slices start in the middle!\n");
1089  return AVERROR(EINVAL);
1090  }
1091 
1092  c->sliceDir = (srcSliceY == 0) ? 1 : -1;
1093  } else if (scale_dst)
1094  c->sliceDir = 1;
1095 
1096  if (c->src0Alpha && !c->dst0Alpha && isALPHA(sws->dst_format)) {
1097  uint8_t *base;
1098  int x,y;
1099 
1100  av_fast_malloc(&c->rgb0_scratch, &c->rgb0_scratch_allocated,
1101  FFABS(srcStride[0]) * srcSliceH + 32);
1102  if (!c->rgb0_scratch)
1103  return AVERROR(ENOMEM);
1104 
1105  base = srcStride[0] < 0 ? c->rgb0_scratch - srcStride[0] * (srcSliceH-1) :
1106  c->rgb0_scratch;
1107  for (y=0; y<srcSliceH; y++){
1108  memcpy(base + srcStride[0]*y, src2[0] + srcStride[0]*y, 4*sws->src_w);
1109  for (x=c->src0Alpha-1; x<4*sws->src_w; x+=4) {
1110  base[ srcStride[0]*y + x] = 0xFF;
1111  }
1112  }
1113  src2[0] = base;
1114  }
1115 
1116  if (c->srcXYZ && !(c->dstXYZ && sws->src_w==sws->dst_w && sws->src_h==sws->dst_h)) {
1117  uint8_t *base;
1118 
1119  av_fast_malloc(&c->xyz_scratch, &c->xyz_scratch_allocated,
1120  FFABS(srcStride[0]) * srcSliceH + 32);
1121  if (!c->xyz_scratch)
1122  return AVERROR(ENOMEM);
1123 
1124  base = srcStride[0] < 0 ? c->xyz_scratch - srcStride[0] * (srcSliceH-1) :
1125  c->xyz_scratch;
1126 
1127  c->xyz12Torgb48(c, base, srcStride[0], src2[0], srcStride[0], sws->src_w, srcSliceH);
1128  src2[0] = base;
1129  }
1130 
1131  if (c->sliceDir != 1) {
1132  // slices go from bottom to top => we flip the image internally
1133  for (i=0; i<4; i++) {
1134  srcStride2[i] *= -1;
1135  dstStride2[i] *= -1;
1136  }
1137 
1138  src2[0] += (srcSliceH - 1) * srcStride[0];
1139  if (!usePal(sws->src_format))
1140  src2[1] += ((srcSliceH >> c->chrSrcVSubSample) - 1) * srcStride[1];
1141  src2[2] += ((srcSliceH >> c->chrSrcVSubSample) - 1) * srcStride[2];
1142  src2[3] += (srcSliceH - 1) * srcStride[3];
1143  dst2[0] += ( sws->dst_h - 1) * dstStride[0];
1144  dst2[1] += ((sws->dst_h >> c->chrDstVSubSample) - 1) * dstStride[1];
1145  dst2[2] += ((sws->dst_h >> c->chrDstVSubSample) - 1) * dstStride[2];
1146  dst2[3] += ( sws->dst_h - 1) * dstStride[3];
1147 
1148  srcSliceY_internal = sws->src_h-srcSliceY-srcSliceH;
1149  }
1150  reset_ptr(src2, sws->src_format);
1151  reset_ptr((void*)dst2, sws->dst_format);
1152 
1153  if (c->convert_unscaled) {
1154  int offset = srcSliceY_internal;
1155  int slice_h = srcSliceH;
1156 
1157  // for dst slice scaling, offset the pointers to match the unscaled API
1158  if (scale_dst) {
1159  av_assert0(offset == 0);
1160  for (i = 0; i < 4 && src2[i]; i++) {
1161  if (!src2[i] || (i > 0 && usePal(sws->src_format)))
1162  break;
1163  src2[i] += (dstSliceY >> ((i == 1 || i == 2) ? c->chrSrcVSubSample : 0)) * srcStride2[i];
1164  }
1165 
1166  for (i = 0; i < 4 && dst2[i]; i++) {
1167  if (!dst2[i] || (i > 0 && usePal(sws->dst_format)))
1168  break;
1169  dst2[i] -= (dstSliceY >> ((i == 1 || i == 2) ? c->chrDstVSubSample : 0)) * dstStride2[i];
1170  }
1171  offset = dstSliceY;
1172  slice_h = dstSliceH;
1173  }
1174 
1175  ret = c->convert_unscaled(c, src2, srcStride2, offset, slice_h,
1176  dst2, dstStride2);
1177  if (scale_dst)
1178  dst2[0] += dstSliceY * dstStride2[0];
1179  } else {
1180  ret = ff_swscale(c, src2, srcStride2, srcSliceY_internal, srcSliceH,
1181  dst2, dstStride2, dstSliceY, dstSliceH);
1182  }
1183 
1184  if (c->dstXYZ && !(c->srcXYZ && sws->src_w==sws->dst_w && sws->src_h==sws->dst_h)) {
1185  uint8_t *dst;
1186 
1187  if (scale_dst) {
1188  dst = dst2[0];
1189  } else {
1190  int dstY = c->dstY ? c->dstY : srcSliceY + srcSliceH;
1191 
1192  av_assert0(dstY >= ret);
1193  av_assert0(ret >= 0);
1194  av_assert0(sws->dst_h >= dstY);
1195  dst = dst2[0] + (dstY - ret) * dstStride2[0];
1196  }
1197 
1198  /* replace on the same data */
1199  c->rgb48Toxyz12(c, dst, dstStride2[0], dst, dstStride2[0], sws->dst_w, ret);
1200  }
1201 
1202  /* reset slice direction at end of frame */
1203  if ((srcSliceY_internal + srcSliceH == sws->src_h) || scale_dst)
1204  c->sliceDir = 0;
1205 
1206  return ret;
1207 }
1208 
1210 {
1211  SwsInternal *c = sws_internal(sws);
1212  if (!c->is_legacy_init)
1213  return;
1214  av_frame_unref(c->frame_src);
1215  av_frame_unref(c->frame_dst);
1216  c->src_ranges.nb_ranges = 0;
1217 }
1218 
1220 {
1221  SwsInternal *c = sws_internal(sws);
1222  FFFramePool *pool = &c->frame_pool;
1223 
1224  av_assert0(!frame->hw_frames_ctx);
1225  const int nb_planes = av_pix_fmt_count_planes(frame->format);
1226  for (int i = 0; i < nb_planes; i++) {
1227  frame->linesize[i] = pool->linesize[i];
1228  frame->buf[i] = av_buffer_pool_get(pool->pools[i]);
1229  if (!frame->buf[i]) {
1231  return AVERROR(ENOMEM);
1232  }
1233  frame->data[i] = frame->buf[i]->data;
1234  }
1235 
1236  return 0;
1237 }
1238 
1240 {
1241  SwsInternal *c = sws_internal(sws);
1242  int ret, allocated = 0;
1243  if (!c->is_legacy_init)
1244  return AVERROR(EINVAL);
1245 
1246  ret = av_frame_ref(c->frame_src, src);
1247  if (ret < 0)
1248  return ret;
1249 
1250  if (!dst->buf[0]) {
1251  dst->width = sws->dst_w;
1252  dst->height = sws->dst_h;
1253  dst->format = sws->dst_format;
1254 
1255  ret = av_frame_get_buffer(dst, 0);
1256  if (ret < 0)
1257  return ret;
1258  allocated = 1;
1259  }
1260 
1261  ret = av_frame_ref(c->frame_dst, dst);
1262  if (ret < 0) {
1263  if (allocated)
1265 
1266  return ret;
1267  }
1268 
1269  return 0;
1270 }
1271 
1272 int sws_send_slice(SwsContext *sws, unsigned int slice_start,
1273  unsigned int slice_height)
1274 {
1275  SwsInternal *c = sws_internal(sws);
1276  int ret;
1277  if (!c->is_legacy_init)
1278  return AVERROR(EINVAL);
1279 
1280  ret = ff_range_add(&c->src_ranges, slice_start, slice_height);
1281  if (ret < 0)
1282  return ret;
1283 
1284  return 0;
1285 }
1286 
1287 unsigned int sws_receive_slice_alignment(const SwsContext *sws)
1288 {
1289  SwsInternal *c = sws_internal(sws);
1290  if (c->slice_ctx)
1291  return sws_internal(c->slice_ctx[0])->dst_slice_align;
1292 
1293  return c->dst_slice_align;
1294 }
1295 
1297  unsigned int slice_height)
1298 {
1299  SwsInternal *c = sws_internal(sws);
1300  unsigned int align = sws_receive_slice_alignment(sws);
1301  uint8_t *dst[4];
1302  if (!c->is_legacy_init)
1303  return AVERROR(EINVAL);
1304 
1305  /* wait until complete input has been received */
1306  if (!(c->src_ranges.nb_ranges == 1 &&
1307  c->src_ranges.ranges[0].start == 0 &&
1308  c->src_ranges.ranges[0].len == sws->src_h))
1309  return AVERROR(EAGAIN);
1310 
1311  if ((slice_start > 0 || slice_height < sws->dst_h) &&
1312  (slice_start % align || slice_height % align)) {
1314  "Incorrectly aligned output: %u/%u not multiples of %u\n",
1315  slice_start, slice_height, align);
1316  return AVERROR(EINVAL);
1317  }
1318 
1319  if (c->slicethread) {
1320  int nb_jobs = c->nb_slice_ctx;
1321  int ret = 0;
1322 
1323  if (c->slice_ctx[0]->dither == SWS_DITHER_ED)
1324  nb_jobs = 1;
1325 
1326  c->dst_slice_start = slice_start;
1327  c->dst_slice_height = slice_height;
1328 
1329  avpriv_slicethread_execute(c->slicethread, nb_jobs, 0);
1330 
1331  for (int i = 0; i < c->nb_slice_ctx; i++) {
1332  if (c->slice_err[i] < 0) {
1333  ret = c->slice_err[i];
1334  break;
1335  }
1336  }
1337 
1338  memset(c->slice_err, 0, c->nb_slice_ctx * sizeof(*c->slice_err));
1339 
1340  return ret;
1341  }
1342 
1343  for (int i = 0; i < FF_ARRAY_ELEMS(dst); i++) {
1344  ptrdiff_t offset = c->frame_dst->linesize[i] * (ptrdiff_t)(slice_start >> c->chrDstVSubSample);
1345  dst[i] = FF_PTR_ADD(c->frame_dst->data[i], offset);
1346  }
1347 
1348  return scale_internal(sws, (const uint8_t * const *)c->frame_src->data,
1349  c->frame_src->linesize, 0, sws->src_h,
1350  dst, c->frame_dst->linesize, slice_start, slice_height);
1351 }
1352 
1353 /* Subset of av_frame_ref() that only references (video) data buffers */
1354 static int frame_ref(AVFrame *dst, const AVFrame *src)
1355 {
1356  /* ref the buffers */
1357  for (int i = 0; i < FF_ARRAY_ELEMS(src->buf); i++) {
1358  if (!src->buf[i])
1359  break;
1360  dst->buf[i] = av_buffer_ref(src->buf[i]);
1361  if (!dst->buf[i])
1362  return AVERROR(ENOMEM);
1363  }
1364 
1365  memcpy(dst->data, src->data, sizeof(src->data));
1366  memcpy(dst->linesize, src->linesize, sizeof(src->linesize));
1367  return 0;
1368 }
1369 
1371 {
1372  int ret, allocated = 0;
1373  SwsInternal *c = sws_internal(sws);
1374  if (!src || !dst)
1375  return AVERROR(EINVAL);
1376 
1377  if (c->is_legacy_init) {
1378  /* Context has been initialized with explicit values, fall back to
1379  * legacy API behavior. */
1380  ret = sws_frame_start(sws, dst, src);
1381  if (ret < 0)
1382  return ret;
1383 
1384  ret = sws_send_slice(sws, 0, src->height);
1385  if (ret >= 0)
1386  ret = sws_receive_slice(sws, 0, dst->height);
1387 
1388  sws_frame_end(sws);
1389 
1390  return ret;
1391  }
1392 
1393  ret = sws_frame_setup(sws, dst, src);
1394  if (ret < 0)
1395  return ret;
1396 
1397  if (!src->data[0])
1398  return 0;
1399 
1400  const SwsGraph *top = c->graph[FIELD_TOP];
1401  const SwsGraph *bot = c->graph[FIELD_BOTTOM];
1402  if (dst->data[0]) /* user-provided buffers */
1403  goto process_frame;
1404 
1405  /* Sanity */
1406  memset(dst->buf, 0, sizeof(dst->buf));
1407  memset(dst->data, 0, sizeof(dst->data));
1408  memset(dst->linesize, 0, sizeof(dst->linesize));
1409  dst->extended_data = dst->data;
1410 
1411  if (src->buf[0] && top->noop && (!bot || bot->noop))
1412  return frame_ref(dst, src);
1413 
1414  ret = frame_alloc_buffers(sws, dst);
1415  if (ret < 0)
1416  return ret;
1417  allocated = 1;
1418 
1420  for (int field = 0; field < (bot ? 2 : 1); field++) {
1421  ret = ff_sws_graph_run(c->graph[field], dst, src);
1422  if (ret < 0) {
1423  if (allocated)
1425  return ret;
1426  }
1427  }
1428 
1429  return 0;
1430 }
1431 
1433 {
1434 #define VALIDATE(field, min, max) \
1435  if (ctx->field < min || ctx->field > max) { \
1436  av_log(ctx, AV_LOG_ERROR, "'%s' (%d) out of range [%d, %d]\n", \
1437  #field, (int) ctx->field, min, max); \
1438  return AVERROR(EINVAL); \
1439  }
1440 
1441  VALIDATE(threads, 0, SWS_MAX_THREADS);
1442  VALIDATE(dither, 0, SWS_DITHER_NB - 1)
1443  VALIDATE(alpha_blend, 0, SWS_ALPHA_BLEND_NB - 1)
1444  return 0;
1445 }
1446 
1448 {
1450  const char *err_msg;
1451  int ret;
1452 
1453  if (!src || !dst)
1454  return AVERROR(EINVAL);
1455  if ((ret = validate_params(ctx)) < 0)
1456  return ret;
1457 
1458  /* For now, if a single frame has a context, then both need a context */
1459  if (!!src->hw_frames_ctx != !!dst->hw_frames_ctx) {
1460  return AVERROR(ENOTSUP);
1461  } else if (!!src->hw_frames_ctx) {
1462  /* Both hardware frames must already be allocated */
1463  if (!src->data[0] || !dst->data[0])
1464  return AVERROR(EINVAL);
1465 
1466  AVHWFramesContext *src_hwfc, *dst_hwfc;
1467  src_hwfc = (AVHWFramesContext *)src->hw_frames_ctx->data;
1468  dst_hwfc = (AVHWFramesContext *)dst->hw_frames_ctx->data;
1469 
1470  /* Both frames must live on the same device */
1471  if (src_hwfc->device_ref->data != dst_hwfc->device_ref->data)
1472  return AVERROR(EINVAL);
1473 
1474  /* Only Vulkan devices are supported */
1475  AVHWDeviceContext *dev_ctx;
1476  dev_ctx = (AVHWDeviceContext *)src_hwfc->device_ref->data;
1477  if (dev_ctx->type != AV_HWDEVICE_TYPE_VULKAN)
1478  return AVERROR(ENOTSUP);
1479 
1480 #if CONFIG_UNSTABLE && CONFIG_VULKAN
1481  ret = ff_sws_vk_init(ctx, src_hwfc->device_ref);
1482  if (ret < 0)
1483  return ret;
1484 #endif
1485  }
1486 
1487  int dst_width = dst->width;
1488  for (int field = 0; field < 2; field++) {
1489  SwsFormat src_fmt = ff_fmt_from_frame(src, field);
1490  SwsFormat dst_fmt = ff_fmt_from_frame(dst, field);
1491  int src_ok, dst_ok;
1492 
1493  if ((src->flags ^ dst->flags) & AV_FRAME_FLAG_INTERLACED) {
1494  err_msg = "Cannot convert interlaced to progressive frames or vice versa.\n";
1495  ret = AVERROR(EINVAL);
1496  goto fail;
1497  }
1498 
1499  src_ok = ff_test_fmt(&src_fmt, 0);
1500  dst_ok = ff_test_fmt(&dst_fmt, 1);
1501  if ((!src_ok || !dst_ok) && !ff_props_equal(&src_fmt, &dst_fmt)) {
1502  err_msg = src_ok ? "Unsupported output" : "Unsupported input";
1503  ret = AVERROR(ENOTSUP);
1504  goto fail;
1505  }
1506 
1507  ret = ff_sws_graph_reinit(ctx, &dst_fmt, &src_fmt, field, &s->graph[field]);
1508  if (ret < 0) {
1509  err_msg = "Failed initializing scaling graph";
1510  goto fail;
1511  }
1512 
1513  const SwsGraph *graph = s->graph[field];
1514  if (graph->incomplete && ctx->flags & SWS_STRICT) {
1515  err_msg = "Incomplete scaling graph";
1516  ret = AVERROR(EINVAL);
1517  goto fail;
1518  }
1519 
1520  if (!graph->noop) {
1521  av_assert0(graph->num_passes);
1522  const SwsPass *last_pass = graph->passes[graph->num_passes - 1];
1523  const int aligned_w = ff_sws_pass_aligned_width(last_pass, dst->width);
1524  dst_width = FFMAX(dst_width, aligned_w);
1525  }
1526 
1527  if (!src_fmt.interlaced) {
1528  ff_sws_graph_free(&s->graph[FIELD_BOTTOM]);
1529  break;
1530  }
1531 
1532  continue;
1533 
1534  fail:
1535  av_log(ctx, AV_LOG_ERROR, "%s (%s): fmt:%s csp:%s prim:%s trc:%s ->"
1536  " fmt:%s csp:%s prim:%s trc:%s\n",
1537  err_msg, av_err2str(ret),
1542 
1543  for (int i = 0; i < FF_ARRAY_ELEMS(s->graph); i++)
1544  ff_sws_graph_free(&s->graph[i]);
1545 
1546  return ret;
1547  }
1548 
1549  if (!dst->hw_frames_ctx) {
1550  ret = ff_frame_pool_video_reinit(&s->frame_pool, dst_width, dst->height,
1551  dst->format, av_cpu_max_align());
1552  if (ret < 0)
1553  return ret;
1554  }
1555 
1556  return 0;
1557 }
1558 
1559 /**
1560  * swscale wrapper, so we don't need to export the SwsContext.
1561  * Assumes planar YUV to be in YUV order instead of YVU.
1562  */
1564  const uint8_t * const srcSlice[],
1565  const int srcStride[], int srcSliceY,
1566  int srcSliceH, uint8_t *const dst[],
1567  const int dstStride[])
1568 {
1569  SwsInternal *c = sws_internal(sws);
1570  if (!c->is_legacy_init)
1571  return AVERROR(EINVAL);
1572 
1573  if (c->nb_slice_ctx) {
1574  sws = c->slice_ctx[0];
1575  c = sws_internal(sws);
1576  }
1577 
1578  return scale_internal(sws, srcSlice, srcStride, srcSliceY, srcSliceH,
1579  dst, dstStride, 0, sws->dst_h);
1580 }
1581 
1582 void ff_sws_slice_worker(void *priv, int jobnr, int threadnr,
1583  int nb_jobs, int nb_threads)
1584 {
1585  SwsInternal *parent = priv;
1586  SwsContext *sws = parent->slice_ctx[threadnr];
1587  SwsInternal *c = sws_internal(sws);
1588 
1589  const int slice_height = FFALIGN(FFMAX((parent->dst_slice_height + nb_jobs - 1) / nb_jobs, 1),
1590  c->dst_slice_align);
1591  const int slice_start = jobnr * slice_height;
1592  const int slice_end = FFMIN((jobnr + 1) * slice_height, parent->dst_slice_height);
1593  int err = 0;
1594 
1595  if (slice_end > slice_start) {
1596  uint8_t *dst[4] = { NULL };
1597 
1598  for (int i = 0; i < FF_ARRAY_ELEMS(dst) && parent->frame_dst->data[i]; i++) {
1599  const int vshift = (i == 1 || i == 2) ? c->chrDstVSubSample : 0;
1600  const ptrdiff_t offset = parent->frame_dst->linesize[i] *
1601  (ptrdiff_t)((slice_start + parent->dst_slice_start) >> vshift);
1602 
1603  dst[i] = parent->frame_dst->data[i] + offset;
1604  }
1605 
1606  err = scale_internal(sws, (const uint8_t * const *)parent->frame_src->data,
1607  parent->frame_src->linesize, 0, sws->src_h,
1608  dst, parent->frame_dst->linesize,
1610  }
1611 
1612  parent->slice_err[threadnr] = err;
1613 }
sws_init_swscale
static av_cold void sws_init_swscale(SwsInternal *c)
Definition: swscale.c:662
isBayer
static av_always_inline int isBayer(enum AVPixelFormat pix_fmt)
Definition: swscale_internal.h:860
flags
const SwsFlags flags[]
Definition: swscale.c:72
ff_test_fmt
int ff_test_fmt(const SwsFormat *fmt, int output)
Definition: format.c:614
_dst
uint8_t * _dst
Definition: dsp.h:56
yuv2planar1_fn
void(* yuv2planar1_fn)(const int16_t *src, uint8_t *dest, int dstW, const uint8_t *dither, int offset)
Write one line of horizontally scaled data to planar output without any additional vertical scaling (...
Definition: swscale_internal.h:126
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:216
process
static void process(NormalizeContext *s, AVFrame *in, AVFrame *out)
Definition: vf_normalize.c:155
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
SwsPlane::sliceH
int sliceH
number of lines
Definition: swscale_internal.h:1109
SwsPass
Represents a single filter pass in the scaling graph.
Definition: graph.h:75
ff_props_equal
static int ff_props_equal(const SwsFormat *fmt1, const SwsFormat *fmt2)
Definition: format.h:124
isPacked
static av_always_inline int isPacked(enum AVPixelFormat pix_fmt)
Definition: swscale_internal.h:905
r
const char * r
Definition: vf_curves.c:127
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
SwsGraph::passes
SwsPass ** passes
Sorted sequence of filter passes to apply.
Definition: graph.h:131
mem_internal.h
ff_fmt_from_frame
SwsFormat ff_fmt_from_frame(const AVFrame *frame, int field)
This function also sanitizes and strips the input data, removing irrelevant fields for certain format...
Definition: format.c:349
av_frame_get_buffer
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:206
AV_PIX_FMT_BGR32
#define AV_PIX_FMT_BGR32
Definition: pixfmt.h:513
SwsFormat::interlaced
int interlaced
Definition: format.h:79
lumRangeToJpeg16_c
static void lumRangeToJpeg16_c(int16_t *_dst, int width, uint32_t coeff, int64_t offset)
Definition: swscale.c:237
lumRangeToJpeg_c
static void lumRangeToJpeg_c(int16_t *dst, int width, uint32_t _coeff, int64_t _offset)
Definition: swscale.c:189
ff_sws_init_range_convert_aarch64
av_cold void ff_sws_init_range_convert_aarch64(SwsInternal *c)
Definition: swscale.c:314
frame_start
static void frame_start(MPVMainEncContext *const m)
Definition: mpegvideo_enc.c:1886
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:3456
SwsContext::src_w
int src_w
Deprecated frame property overrides, for the legacy API only.
Definition: swscale.h:253
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:90
process_frame
static av_always_inline int process_frame(AVTextFormatContext *tfc, InputFile *ifile, AVFrame *frame, const AVPacket *pkt, int *packet_new)
Definition: ffprobe.c:1560
ff_sws_graph_reinit
int ff_sws_graph_reinit(SwsContext *ctx, const SwsFormat *dst, const SwsFormat *src, int field, SwsGraph **out_graph)
Wrapper around ff_sws_graph_create() that reuses the existing graph if the format is compatible.
Definition: graph.c:938
ff_rotate_slice
int ff_rotate_slice(SwsSlice *s, int lum, int chr)
Definition: slice.c:120
int64_t
long long int64_t
Definition: coverity.c:34
AV_PIX_FMT_FLAG_FLOAT
#define AV_PIX_FMT_FLAG_FLOAT
The pixel format contains IEEE-754 floating point values.
Definition: pixdesc.h:158
SwsSlice::plane
SwsPlane plane[MAX_SLICE_PLANES]
color planes
Definition: swscale_internal.h:1127
avpriv_slicethread_execute
void avpriv_slicethread_execute(AVSliceThread *ctx, int nb_jobs, int execute_main)
Execute slice threading.
Definition: slicethread.c:270
ff_sws_init_range_convert_loongarch
av_cold void ff_sws_init_range_convert_loongarch(SwsInternal *c)
Definition: swscale_init_loongarch.c:27
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:435
pixdesc.h
RV_IDX
#define RV_IDX
Definition: swscale_internal.h:473
u
#define u(width, name, range_min, range_max)
Definition: cbs_apv.c:68
isGray
static av_always_inline int isGray(enum AVPixelFormat pix_fmt)
Definition: swscale_internal.h:805
RU_IDX
#define RU_IDX
Definition: swscale_internal.h:470
SWS_BITEXACT
@ SWS_BITEXACT
Definition: swscale.h:157
b
#define b
Definition: input.c:43
SwsFilterDescriptor
Struct which holds all necessary data for processing a slice.
Definition: swscale_internal.h:1134
yuv2planeX
static void FUNC() yuv2planeX(const int16_t *filter, int filterSize, const int16_t **src, uint8_t *dest, int dstW, const uint8_t *dither, int offset)
Definition: swscale_ppc_template.c:84
data
const char data[16]
Definition: mxf.c:149
GV_IDX
#define GV_IDX
Definition: swscale_internal.h:474
AV_PIX_FMT_MONOWHITE
@ AV_PIX_FMT_MONOWHITE
Y , 1bpp, 0 is white, 1 is black, in each byte pixels are ordered from the msb to the lsb.
Definition: pixfmt.h:82
rgb2yuv
static const char rgb2yuv[]
Definition: vf_scale_vulkan.c:86
BV_IDX
#define BV_IDX
Definition: swscale_internal.h:475
AV_PIX_FMT_RGB32_1
#define AV_PIX_FMT_RGB32_1
Definition: pixfmt.h:512
SwsContext::flags
unsigned flags
Bitmask of SWS_*.
Definition: swscale.h:219
base
uint8_t base
Definition: vp3data.h:128
filter
void(* filter)(uint8_t *src, int stride, int qscale)
Definition: h263dsp.c:29
sws_receive_slice
int sws_receive_slice(SwsContext *sws, unsigned int slice_start, unsigned int slice_height)
Request a horizontal slice of the output data to be written into the frame previously provided to sws...
Definition: swscale.c:1296
FIELD_TOP
@ FIELD_TOP
Definition: format.h:56
AV_PIX_FMT_BGR24
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:76
ff_sws_init_swscale_riscv
av_cold void ff_sws_init_swscale_riscv(SwsInternal *c)
Definition: swscale.c:74
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
av_buffer_ref
AVBufferRef * av_buffer_ref(const AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:103
av_get_cpu_flags
int av_get_cpu_flags(void)
Return the flags which specify extensions supported by the CPU.
Definition: cpu.c:109
DEBUG_BUFFERS
#define DEBUG_BUFFERS(...)
Definition: swscale.c:259
bit_depth
static void bit_depth(AudioStatsContext *s, const uint64_t *const mask, uint8_t *depth)
Definition: af_astats.c:246
cpu_flags
static atomic_int cpu_flags
Definition: cpu.c:56
hScale16To15_c
static void hScale16To15_c(SwsInternal *c, int16_t *dst, int dstW, const uint8_t *_src, const int16_t *filter, const int32_t *filterPos, int filterSize)
Definition: swscale.c:99
_src
uint8_t ptrdiff_t const uint8_t * _src
Definition: dsp.h:56
SwsInternal::frame_dst
AVFrame * frame_dst
Definition: swscale_internal.h:376
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:456
AV_HWDEVICE_TYPE_VULKAN
@ AV_HWDEVICE_TYPE_VULKAN
Definition: hwcontext.h:39
SWS_FAST_BILINEAR
@ SWS_FAST_BILINEAR
Scaler selection options.
Definition: swscale.h:176
is16BPS
static av_always_inline int is16BPS(enum AVPixelFormat pix_fmt)
Definition: swscale_internal.h:745
av_pix_fmt_count_planes
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:3496
ff_sws_init_input_funcs
void ff_sws_init_input_funcs(SwsInternal *c, planar1_YV12_fn *lumToYV12, planar1_YV12_fn *alpToYV12, planar2_YV12_fn *chrToYV12, planarX_YV12_fn *readLumPlanar, planarX_YV12_fn *readAlpPlanar, planarX2_YV12_fn *readChrPlanar)
validate_params
static int validate_params(SwsContext *ctx)
Definition: swscale.c:1432
chrRangeToJpeg16_c
static void chrRangeToJpeg16_c(int16_t *_dstU, int16_t *_dstV, int width, uint32_t coeff, int64_t offset)
Definition: swscale.c:211
FFFramePool
Frame pool.
Definition: framepool.h:32
slice_end
static int slice_end(AVCodecContext *avctx, AVFrame *pict, int *got_output)
Handle slice ends.
Definition: mpeg12dec.c:1693
av_color_space_name
const char * av_color_space_name(enum AVColorSpace space)
Definition: pixdesc.c:3856
AV_PIX_FMT_GBRAP
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:212
chrRangeFromJpeg_c
static void chrRangeFromJpeg_c(int16_t *dstU, int16_t *dstV, int width, uint32_t _coeff, int64_t _offset)
Definition: swscale.c:177
SWS_MAX_THREADS
#define SWS_MAX_THREADS
Definition: swscale_internal.h:57
fail
#define fail()
Definition: checkasm.h:224
chrRangeFromJpeg16_c
static void chrRangeFromJpeg16_c(int16_t *_dstU, int16_t *_dstV, int width, uint32_t coeff, int64_t offset)
Definition: swscale.c:225
SwsInternal::frame_src
AVFrame * frame_src
Definition: swscale_internal.h:375
sws_frame_setup
int sws_frame_setup(SwsContext *ctx, const AVFrame *dst, const AVFrame *src)
Like sws_scale_frame, but without actually scaling.
Definition: swscale.c:1447
ff_sws_init_xyzdsp_aarch64
av_cold void ff_sws_init_xyzdsp_aarch64(SwsInternal *c)
Definition: swscale.c:339
val
static double val(void *priv, double ch)
Definition: aeval.c:77
SWS_ALPHA_BLEND_NB
@ SWS_ALPHA_BLEND_NB
Definition: swscale.h:92
isNBPS
static av_always_inline int isNBPS(enum AVPixelFormat pix_fmt)
Definition: swscale_internal.h:759
init_range_convert_constants
static void init_range_convert_constants(SwsInternal *c)
Definition: swscale.c:591
SwsColor::trc
enum AVColorTransferCharacteristic trc
Definition: format.h:62
AVHWDeviceContext
This struct aggregates all the (hardware/vendor-specific) "high-level" state, i.e.
Definition: hwcontext.h:63
AV_PIX_FMT_BGR8
@ AV_PIX_FMT_BGR8
packed RGB 3:3:2, 8bpp, (msb)2B 3G 3R(lsb)
Definition: pixfmt.h:90
avassert.h
hScale8To19_c
static void hScale8To19_c(SwsInternal *c, int16_t *_dst, int dstW, const uint8_t *src, const int16_t *filter, const int32_t *filterPos, int filterSize)
Definition: swscale.c:144
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:210
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:119
yuv2packed2_fn
void(* yuv2packed2_fn)(SwsInternal *c, const int16_t *lumSrc[2], const int16_t *chrUSrc[2], const int16_t *chrVSrc[2], const int16_t *alpSrc[2], uint8_t *dest, int dstW, int yalpha, int uvalpha, int y)
Write one line of horizontally scaled Y/U/V/A to packed-pixel YUV/RGB output by doing bilinear scalin...
Definition: swscale_internal.h:232
SwsContext::dither
SwsDither dither
Dither mode.
Definition: swscale.h:235
av_buffer_pool_get
AVBufferRef * av_buffer_pool_get(AVBufferPool *pool)
Allocate a new AVBuffer, reusing an old buffer from the pool when available.
Definition: buffer.c:390
SwsInternal::slice_err
int * slice_err
Definition: swscale_internal.h:344
emms_c
#define emms_c()
Definition: emms.h:89
ff_sws_vk_init
int ff_sws_vk_init(SwsContext *sws, AVBufferRef *dev_ref)
Definition: ops.c:44
intreadwrite.h
dither
static const uint16_t dither[8][8]
Definition: vf_gradfun.c:46
SwsInternal::slice_ctx
SwsContext ** slice_ctx
Definition: swscale_internal.h:343
s
#define s(width, name)
Definition: cbs_vp9.c:198
ff_update_palette
void ff_update_palette(SwsInternal *c, const uint32_t *pal)
Definition: swscale.c:873
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:60
ff_sws_init_swscale_arm
av_cold void ff_sws_init_swscale_arm(SwsInternal *c)
Definition: swscale.c:33
AVFormatContext::flags
int flags
Flags modifying the (de)muxer behaviour.
Definition: avformat.h:1414
g
const char * g
Definition: vf_curves.c:128
pix_fmt
static enum AVPixelFormat pix_fmt
Definition: demux_decode.c:41
ops.h
SwsSlice::width
int width
Slice line width.
Definition: swscale_internal.h:1121
bits
uint8_t bits
Definition: vp3data.h:128
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:42
VALIDATE
#define VALIDATE(field, min, max)
SwsGraph::num_passes
int num_passes
Definition: graph.h:132
ctx
static AVFormatContext * ctx
Definition: movenc.c:49
AV_RL16
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_RL16
Definition: bytestream.h:94
chrRangeToJpeg_c
static void chrRangeToJpeg_c(int16_t *dstU, int16_t *dstV, int width, uint32_t _coeff, int64_t _offset)
Definition: swscale.c:163
ff_hcscale_fast_c
void ff_hcscale_fast_c(SwsInternal *c, int16_t *dst1, int16_t *dst2, int dstWidth, const uint8_t *src1, const uint8_t *src2, int srcW, int xInc)
Definition: hscale_fast_bilinear.c:38
frame_alloc_buffers
static int frame_alloc_buffers(SwsContext *sws, AVFrame *frame)
Definition: swscale.c:1219
field
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this field
Definition: writing_filters.txt:78
ff_sws_init_range_convert_riscv
av_cold void ff_sws_init_range_convert_riscv(SwsInternal *c)
Definition: swscale.c:29
FFFramePool::pools
AVBufferPool * pools[4]
Definition: framepool.h:52
GY_IDX
#define GY_IDX
Definition: swscale_internal.h:468
AV_PIX_FMT_BGR32_1
#define AV_PIX_FMT_BGR32_1
Definition: pixfmt.h:514
sws_frame_end
void sws_frame_end(SwsContext *sws)
Finish the scaling process for a pair of source/destination frames previously submitted with sws_fram...
Definition: swscale.c:1209
FIELD_BOTTOM
@ FIELD_BOTTOM
Definition: format.h:57
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:74
if
if(ret)
Definition: filter_design.txt:179
ff_sws_init_range_convert_x86
av_cold void ff_sws_init_range_convert_x86(SwsInternal *c)
Definition: swscale.c:470
AV_PIX_FMT_GRAY8A
@ AV_PIX_FMT_GRAY8A
alias for AV_PIX_FMT_YA8
Definition: pixfmt.h:143
scale_internal
static int scale_internal(SwsContext *sws, const uint8_t *const srcSlice[], const int srcStride[], int srcSliceY, int srcSliceH, uint8_t *const dstSlice[], const int dstStride[], int dstSliceY, int dstSliceH)
Definition: swscale.c:1012
fillPlane
static av_always_inline void fillPlane(uint8_t *plane, int stride, int width, int height, int y, uint8_t val)
Definition: swscale.c:58
NULL
#define NULL
Definition: coverity.c:32
rgb48Toxyz12_c
static void rgb48Toxyz12_c(const SwsInternal *c, uint8_t *dst, int dst_stride, const uint8_t *src, int src_stride, int w, int h)
Definition: swscale.c:804
format
New swscale design to change SwsGraph is what coordinates multiple passes These can include cascaded scaling error diffusion and so on Or we could have separate passes for the vertical and horizontal scaling In between each SwsPass lies a fully allocated image buffer Graph passes may have different levels of e g we can have a single threaded error diffusion pass following a multi threaded scaling pass SwsGraph is internally recreated whenever the image format
Definition: swscale-v2.txt:14
SwsPlane::available_lines
int available_lines
max number of lines that can be hold by this plane
Definition: swscale_internal.h:1107
AV_WB16
#define AV_WB16(p, v)
Definition: intreadwrite.h:401
SwsContext::gamma_flag
int gamma_flag
Use gamma correct scaling.
Definition: swscale.h:245
AVHWFramesContext::device_ref
AVBufferRef * device_ref
A reference to the parent AVHWDeviceContext.
Definition: hwcontext.h:129
AV_PIX_FMT_MONOBLACK
@ AV_PIX_FMT_MONOBLACK
Y , 1bpp, 0 is black, 1 is white, in each byte pixels are ordered from the msb to the lsb.
Definition: pixfmt.h:83
V
#define V
Definition: avdct.c:32
FF_PTR_ADD
#define FF_PTR_ADD(ptr, off)
Definition: internal.h:80
av_color_primaries_name
const char * av_color_primaries_name(enum AVColorPrimaries primaries)
Definition: pixdesc.c:3790
av_cpu_max_align
size_t av_cpu_max_align(void)
Get the maximum data alignment that may be required by FFmpeg.
Definition: cpu.c:287
AV_PIX_FMT_RGB8
@ AV_PIX_FMT_RGB8
packed RGB 3:3:2, 8bpp, (msb)3R 3G 2B(lsb)
Definition: pixfmt.h:93
hScale8To15_c
static void hScale8To15_c(SwsInternal *c, int16_t *dst, int dstW, const uint8_t *src, const int16_t *filter, const int32_t *filterPos, int filterSize)
Definition: swscale.c:128
ff_sws_init_range_convert
av_cold void ff_sws_init_range_convert(SwsInternal *c)
Definition: swscale.c:626
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:81
hScale16To19_c
static void hScale16To19_c(SwsInternal *c, int16_t *_dst, int dstW, const uint8_t *_src, const int16_t *filter, const int32_t *filterPos, int filterSize)
Definition: swscale.c:69
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
SwsInternal::dstY
int dstY
Last destination vertical line output from last slice.
Definition: swscale_internal.h:458
ff_sws_init_xyzdsp
av_cold void ff_sws_init_xyzdsp(SwsInternal *c)
Definition: swscale.c:863
ff_sws_pass_aligned_width
int ff_sws_pass_aligned_width(const SwsPass *pass, int width)
Align width to the optimal size for pass.
Definition: graph.c:41
AV_PIX_FMT_BGR4_BYTE
@ AV_PIX_FMT_BGR4_BYTE
packed RGB 1:2:1, 8bpp, (msb)1B 2G 1R(lsb)
Definition: pixfmt.h:92
ff_range_add
int ff_range_add(RangeList *r, unsigned int start, unsigned int len)
Definition: utils.c:2432
attribute_align_arg
#define attribute_align_arg
Definition: internal.h:50
yuv2packedX_fn
void(* yuv2packedX_fn)(SwsInternal *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrc, const int16_t **chrVSrc, int chrFilterSize, const int16_t **alpSrc, uint8_t *dest, int dstW, int y)
Write one line of horizontally scaled Y/U/V/A to packed-pixel YUV/RGB output by doing multi-point ver...
Definition: swscale_internal.h:264
AV_CPU_FLAG_SSE2
#define AV_CPU_FLAG_SSE2
PIV SSE2 functions.
Definition: cpu.h:37
ff_sws_graph_free
void ff_sws_graph_free(SwsGraph **pgraph)
Uninitialize any state associate with this filter graph and free it.
Definition: graph.c:903
ff_sws_slice_worker
void ff_sws_slice_worker(void *priv, int jobnr, int threadnr, int nb_jobs, int nb_threads)
Definition: swscale.c:1582
isBE
static av_always_inline int isBE(enum AVPixelFormat pix_fmt)
Definition: swscale_internal.h:766
AV_PIX_FMT_RGB24
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:75
RY_IDX
#define RY_IDX
Definition: swscale_internal.h:467
ff_sws_init_swscale_loongarch
av_cold void ff_sws_init_swscale_loongarch(SwsInternal *c)
Definition: swscale_init_loongarch.c:62
height
#define height
Definition: dsp.h:89
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:278
DECLARE_ALIGNED
#define DECLARE_ALIGNED(n, t, v)
Definition: mem_internal.h:104
fillPlane16
static void fillPlane16(uint8_t *plane, int stride, int width, int height, int y, int alpha, int bits, const int big_endian)
Definition: swscale_internal.h:1067
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:87
usePal
static av_always_inline int usePal(enum AVPixelFormat pix_fmt)
Definition: swscale_internal.h:936
cpu.h
i
#define i(width, name, range_min, range_max)
Definition: cbs_h264.c:63
isAnyRGB
static av_always_inline int isAnyRGB(enum AVPixelFormat pix_fmt)
Definition: swscale_internal.h:874
av_err2str
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:122
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:424
SwsContext::src_h
int src_h
Width and height of the source frame.
Definition: swscale.h:253
SwsFormat
Definition: format.h:77
xyz12Torgb48_c
static void xyz12Torgb48_c(const SwsInternal *c, uint8_t *dst, int dst_stride, const uint8_t *src, int src_stride, int w, int h)
Definition: swscale.c:745
align
static const uint8_t *BS_FUNC() align(BSCTX *bc)
Skip bits to a byte boundary.
Definition: bitstream_template.h:419
is32BPS
static av_always_inline int is32BPS(enum AVPixelFormat pix_fmt)
Definition: swscale_internal.h:752
AV_WL16
#define AV_WL16(p, v)
Definition: intreadwrite.h:408
AV_PIX_FMT_RGB32
#define AV_PIX_FMT_RGB32
Definition: pixfmt.h:511
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
lumRangeFromJpeg_c
static void lumRangeFromJpeg_c(int16_t *dst, int width, uint32_t _coeff, int64_t _offset)
Definition: swscale.c:201
SWS_DITHER_NB
@ SWS_DITHER_NB
Definition: swscale.h:86
ff_sws_init_swscale_ppc
av_cold void ff_sws_init_swscale_ppc(SwsInternal *c)
Definition: swscale_altivec.c:233
SwsContext::dst_format
int dst_format
Destination pixel format.
Definition: swscale.h:256
fillPlane32
static void fillPlane32(uint8_t *plane, int stride, int width, int height, int y, int alpha, int bits, const int big_endian, int is_float)
Definition: swscale_internal.h:1081
Y
#define Y
Definition: boxblur.h:37
yuv2anyX_fn
void(* yuv2anyX_fn)(SwsInternal *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrc, const int16_t **chrVSrc, int chrFilterSize, const int16_t **alpSrc, uint8_t **dest, int dstW, int y)
Write one line of horizontally scaled Y/U/V/A to YUV/RGB output by doing multi-point vertical scaling...
Definition: swscale_internal.h:298
BY_IDX
#define BY_IDX
Definition: swscale_internal.h:469
ff_sws_init_swscale_x86
av_cold void ff_sws_init_swscale_x86(SwsInternal *c)
Definition: swscale.c:486
scale_cascaded
static int scale_cascaded(SwsInternal *c, const uint8_t *const srcSlice[], const int srcStride[], int srcSliceY, int srcSliceH, uint8_t *const dstSlice[], const int dstStride[], int dstSliceY, int dstSliceH)
Definition: swscale.c:993
emms.h
ff_updateMMXDitherTables
void ff_updateMMXDitherTables(SwsInternal *c, int dstY)
Definition: swscale.c:60
SwsInternal::dst_slice_align
unsigned int dst_slice_align
Definition: swscale_internal.h:692
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:68
SwsFormat::format
enum AVPixelFormat format
Definition: format.h:80
sws_send_slice
int sws_send_slice(SwsContext *sws, unsigned int slice_start, unsigned int slice_height)
Indicate that a horizontal slice of input data is available in the source frame previously provided t...
Definition: swscale.c:1272
ff_sws_init_scale
void ff_sws_init_scale(SwsInternal *c)
Definition: swscale.c:697
src2
const pixel * src2
Definition: h264pred_template.c:421
common.h
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:58
check_image_pointers
static int check_image_pointers(const uint8_t *const data[4], enum AVPixelFormat pix_fmt, const int linesizes[4])
Definition: swscale.c:728
av_always_inline
#define av_always_inline
Definition: attributes.h:76
swscale_internal.h
yuv2interleavedX_fn
void(* yuv2interleavedX_fn)(enum AVPixelFormat dstFormat, const uint8_t *chrDither, const int16_t *chrFilter, int chrFilterSize, const int16_t **chrUSrc, const int16_t **chrVSrc, uint8_t *dest, int dstW)
Write one line of horizontally scaled chroma to interleaved output with multi-point vertical scaling ...
Definition: swscale_internal.h:162
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
AV_PIX_FMT_FLAG_BE
#define AV_PIX_FMT_FLAG_BE
Pixel format is big-endian.
Definition: pixdesc.h:116
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:496
SwsContext::dst_h
int dst_h
Width and height of the destination frame.
Definition: swscale.h:254
AV_PIX_FMT_RGB4_BYTE
@ AV_PIX_FMT_RGB4_BYTE
packed RGB 1:2:1, 8bpp, (msb)1R 2G 1B(lsb)
Definition: pixfmt.h:95
SwsSlice
Struct which defines a slice of an image to be scaled or an output for a scaled slice.
Definition: swscale_internal.h:1119
AV_FRAME_FLAG_INTERLACED
#define AV_FRAME_FLAG_INTERLACED
A flag to mark frames whose content is interlaced.
Definition: frame.h:658
slice_start
static int slice_start(SliceContext *sc, VVCContext *s, VVCFrameContext *fc, const CodedBitstreamUnit *unit, const int is_first_slice)
Definition: dec.c:844
ff_init_slice_from_src
int ff_init_slice_from_src(SwsSlice *s, uint8_t *const src[4], const int stride[4], int srcW, int lumY, int lumH, int chrY, int chrH, int relative)
Definition: slice.c:148
AVHWFramesContext
This struct describes a set or pool of "hardware" frames (i.e.
Definition: hwcontext.h:118
frame_ref
static int frame_ref(AVFrame *dst, const AVFrame *src)
Definition: swscale.c:1354
SWS_DITHER_ED
@ SWS_DITHER_ED
Definition: swscale.h:83
AV_PIX_FMT_PAL8
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
Definition: pixfmt.h:84
yuv2packed1_fn
void(* yuv2packed1_fn)(SwsInternal *c, const int16_t *lumSrc, const int16_t *chrUSrc[2], const int16_t *chrVSrc[2], const int16_t *alpSrc, uint8_t *dest, int dstW, int uvalpha, int y)
Write one line of horizontally scaled Y/U/V/A to packed-pixel YUV/RGB output without any additional v...
Definition: swscale_internal.h:199
SwsInternal
Definition: swscale_internal.h:335
ret
ret
Definition: filter_design.txt:187
sws_receive_slice_alignment
unsigned int sws_receive_slice_alignment(const SwsContext *sws)
Get the alignment required for slices.
Definition: swscale.c:1287
__asm__
__asm__(".macro parse_r var r\n\t" "\\var = -1\n\t" _IFC_REG(0) _IFC_REG(1) _IFC_REG(2) _IFC_REG(3) _IFC_REG(4) _IFC_REG(5) _IFC_REG(6) _IFC_REG(7) _IFC_REG(8) _IFC_REG(9) _IFC_REG(10) _IFC_REG(11) _IFC_REG(12) _IFC_REG(13) _IFC_REG(14) _IFC_REG(15) _IFC_REG(16) _IFC_REG(17) _IFC_REG(18) _IFC_REG(19) _IFC_REG(20) _IFC_REG(21) _IFC_REG(22) _IFC_REG(23) _IFC_REG(24) _IFC_REG(25) _IFC_REG(26) _IFC_REG(27) _IFC_REG(28) _IFC_REG(29) _IFC_REG(30) _IFC_REG(31) ".iflt \\var\n\t" ".error \"Unable to parse register name \\r\"\n\t" ".endif\n\t" ".endm")
AVHWDeviceContext::type
enum AVHWDeviceType type
This field identifies the underlying API used for hardware access.
Definition: hwcontext.h:75
bswap.h
sws_frame_start
int sws_frame_start(SwsContext *sws, AVFrame *dst, const AVFrame *src)
Initialize the scaling process for a given pair of source/destination frames.
Definition: swscale.c:1239
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:265
sws_pb_64
static const uint8_t sws_pb_64[8]
Definition: swscale.c:54
U
#define U(x)
Definition: vpx_arith.h:37
yuv2planarX_fn
void(* yuv2planarX_fn)(const int16_t *filter, int filterSize, const int16_t **src, uint8_t *dest, int dstW, const uint8_t *dither, int offset)
Write one line of horizontally scaled data to planar output with multi-point vertical scaling between...
Definition: swscale_internal.h:142
SwsGraph::noop
bool noop
Definition: graph.h:126
reset_ptr
static void reset_ptr(const uint8_t *src[], enum AVPixelFormat format)
Definition: swscale.c:716
ff_init_vscale_pfn
void ff_init_vscale_pfn(SwsInternal *c, yuv2planar1_fn yuv2plane1, yuv2planarX_fn yuv2planeX, yuv2interleavedX_fn yuv2nv12cX, yuv2packed1_fn yuv2packed1, yuv2packed2_fn yuv2packed2, yuv2packedX_fn yuv2packedX, yuv2anyX_fn yuv2anyX, int use_mmx)
setup vertical scaler functions
Definition: vscale.c:258
sws_scale
int attribute_align_arg sws_scale(SwsContext *sws, const uint8_t *const srcSlice[], const int srcStride[], int srcSliceY, int srcSliceH, uint8_t *const dst[], const int dstStride[])
swscale wrapper, so we don't need to export the SwsContext.
Definition: swscale.c:1563
FFFramePool::linesize
int linesize[4]
Definition: framepool.h:51
SWS_PRINT_INFO
@ SWS_PRINT_INFO
Emit verbose log of scaling parameters.
Definition: swscale.h:120
SwsFormat::color
SwsColor color
Definition: format.h:86
lumRangeFromJpeg16_c
static void lumRangeFromJpeg16_c(int16_t *_dst, int width, uint32_t coeff, int64_t offset)
Definition: swscale.c:248
atomic_exchange_explicit
#define atomic_exchange_explicit(object, desired, order)
Definition: stdatomic.h:106
SWS_STRICT
@ SWS_STRICT
Return an error on underspecified conversions.
Definition: swscale.h:115
ff_dither_8x8_128
const uint8_t ff_dither_8x8_128[9][8]
Definition: swscale.c:42
AV_CPU_FLAG_MMXEXT
#define AV_CPU_FLAG_MMXEXT
SSE integer functions or AMD MMX ext.
Definition: cpu.h:33
Windows::Graphics::DirectX::Direct3D11::p
IDirect3DDxgiInterfaceAccess _COM_Outptr_ void ** p
Definition: vsrc_gfxcapture_winrt.hpp:53
ff_swscale
int ff_swscale(SwsInternal *c, const uint8_t *const src[], const int srcStride[], int srcSliceY, int srcSliceH, uint8_t *const dst[], const int dstStride[], int dstSliceY, int dstSliceH)
Definition: swscale.c:263
SwsFormat::csp
enum AVColorSpace csp
Definition: format.h:83
av_clip_uint8
#define av_clip_uint8
Definition: common.h:106
av_clip_uint16
#define av_clip_uint16
Definition: common.h:112
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:165
desc
const char * desc
Definition: libsvtav1.c:83
scale_gamma
static int scale_gamma(SwsInternal *c, const uint8_t *const srcSlice[], const int srcStride[], int srcSliceY, int srcSliceH, uint8_t *const dstSlice[], const int dstStride[], int dstSliceY, int dstSliceH)
Definition: swscale.c:959
SwsGraph::incomplete
bool incomplete
Definition: graph.h:125
mem.h
BU_IDX
#define BU_IDX
Definition: swscale_internal.h:472
SwsPlane::sliceY
int sliceY
index of first line
Definition: swscale_internal.h:1108
SwsContext::dst_w
int dst_w
Definition: swscale.h:254
SwsInternal::dst_slice_height
int dst_slice_height
Definition: swscale_internal.h:352
SwsGraph
Filter graph, which represents a 'baked' pixel format conversion.
Definition: graph.h:121
SwsContext::src_format
int src_format
Source pixel format.
Definition: swscale.h:255
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
w
uint8_t w
Definition: llvidencdsp.c:39
ff_hyscale_fast_c
void ff_hyscale_fast_c(SwsInternal *c, int16_t *dst, int dstWidth, const uint8_t *src, int srcW, int xInc)
Definition: hscale_fast_bilinear.c:23
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
ff_sws_init_output_funcs
av_cold void ff_sws_init_output_funcs(SwsInternal *c, yuv2planar1_fn *yuv2plane1, yuv2planarX_fn *yuv2planeX, yuv2interleavedX_fn *yuv2nv12cX, yuv2packed1_fn *yuv2packed1, yuv2packed2_fn *yuv2packed2, yuv2packedX_fn *yuv2packedX, yuv2anyX_fn *yuv2anyX)
Definition: output.c:3291
av_fast_malloc
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:557
ff_sws_init_swscale_aarch64
av_cold void ff_sws_init_swscale_aarch64(SwsInternal *c)
Definition: swscale.c:350
ff_frame_pool_video_reinit
int ff_frame_pool_video_reinit(FFFramePool *pool, int width, int height, enum AVPixelFormat format, int align)
Recreate the video frame pool if its current configuration differs from the provided configuration.
Definition: framepool.c:223
SwsInternal::dst_slice_start
int dst_slice_start
Definition: swscale_internal.h:351
int32_t
int32_t
Definition: audioconvert.c:56
hwcontext.h
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:480
coeff
static const double coeff[2][5]
Definition: vf_owdenoise.c:80
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
sws_internal
static SwsInternal * sws_internal(const SwsContext *sws)
Definition: swscale_internal.h:79
sws_scale_frame
int sws_scale_frame(SwsContext *sws, AVFrame *dst, const AVFrame *src)
Scale source data from src and write the output to dst.
Definition: swscale.c:1370
h
h
Definition: vp9dsp_template.c:2070
stride
#define stride
Definition: h264pred_template.c:536
solve_range_convert
static void solve_range_convert(uint16_t src_min, uint16_t src_max, uint16_t dst_min, uint16_t dst_max, int src_bits, int src_shift, int mult_shift, uint32_t *coeff, int64_t *offset)
Definition: swscale.c:577
isPlanar
static av_always_inline int isPlanar(enum AVPixelFormat pix_fmt)
Definition: swscale_internal.h:914
GU_IDX
#define GU_IDX
Definition: swscale_internal.h:471
width
#define width
Definition: dsp.h:89
SwsContext
Main external API structure.
Definition: swscale.h:206
ff_sws_graph_run
New swscale design to change SwsGraph is what coordinates multiple passes These can include cascaded scaling error diffusion and so on Or we could have separate passes for the vertical and horizontal scaling In between each SwsPass lies a fully allocated image buffer Graph passes may have different levels of e g we can have a single threaded error diffusion pass following a multi threaded scaling pass SwsGraph is internally recreated whenever the image dimensions or settings change in any way splits interlaced images into separate and calls ff_sws_graph_run() on each. From the point of view of SwsGraph itself
av_color_transfer_name
const char * av_color_transfer_name(enum AVColorTransferCharacteristic transfer)
Definition: pixdesc.c:3823
src
#define src
Definition: vp8dsp.c:248
swscale.h
SwsColor::prim
enum AVColorPrimaries prim
Definition: format.h:61
av_get_pix_fmt_name
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:3376
AV_RB16
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_WB24 unsigned int_TMPL AV_RB16
Definition: bytestream.h:98
isALPHA
static av_always_inline int isALPHA(enum AVPixelFormat pix_fmt)
Definition: swscale_internal.h:896
RGB2YUV_SHIFT
#define RGB2YUV_SHIFT
Definition: swscale_internal.h:476