FFmpeg
vf_colorspace.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2016 Ronald S. Bultje <rsbultje@gmail.com>
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /*
22  * @file
23  * Convert between colorspaces.
24  */
25 
26 #include "libavutil/avassert.h"
27 #include "libavutil/opt.h"
28 #include "libavutil/pixdesc.h"
29 #include "libavutil/pixfmt.h"
30 
31 #include "avfilter.h"
32 #include "colorspacedsp.h"
33 #include "formats.h"
34 #include "internal.h"
35 #include "video.h"
36 #include "colorspace.h"
37 
38 enum DitherMode {
42 };
43 
44 enum Colorspace {
55 };
56 
57 enum Whitepoint {
63 };
64 
71 };
72 
84 };
85 
86 static const enum AVColorPrimaries default_prm[CS_NB + 1] = {
97 };
98 
99 static const enum AVColorSpace default_csp[CS_NB + 1] = {
110 };
111 
115 };
116 
118  double alpha, beta, gamma, delta;
119 };
120 
121 typedef struct ColorSpaceContext {
122  const AVClass *class;
123 
125 
126  enum Colorspace user_all, user_iall;
127  enum AVColorSpace in_csp, out_csp, user_csp, user_icsp;
128  enum AVColorRange in_rng, out_rng, user_rng, user_irng;
129  enum AVColorTransferCharacteristic in_trc, out_trc, user_trc, user_itrc;
130  enum AVColorPrimaries in_prm, out_prm, user_prm, user_iprm;
131  enum AVPixelFormat in_format, user_format;
134  enum WhitepointAdaptation wp_adapt;
135 
136  int16_t *rgb[3];
137  ptrdiff_t rgb_stride;
138  unsigned rgb_sz;
139  int *dither_scratch[3][2], *dither_scratch_base[3][2];
140 
141  const struct ColorPrimaries *in_primaries, *out_primaries;
143  DECLARE_ALIGNED(16, int16_t, lrgb2lrgb_coeffs)[3][3][8];
144 
145  const struct TransferCharacteristics *in_txchr, *out_txchr;
147  int16_t *lin_lut, *delin_lut;
148 
149  const struct LumaCoefficients *in_lumacoef, *out_lumacoef;
150  int yuv2yuv_passthrough, yuv2yuv_fastmode;
151  DECLARE_ALIGNED(16, int16_t, yuv2rgb_coeffs)[3][3][8];
152  DECLARE_ALIGNED(16, int16_t, rgb2yuv_coeffs)[3][3][8];
153  DECLARE_ALIGNED(16, int16_t, yuv2yuv_coeffs)[3][3][8];
154  DECLARE_ALIGNED(16, int16_t, yuv_offset)[2 /* in, out */][8];
159  double yuv2rgb_dbl_coeffs[3][3], rgb2yuv_dbl_coeffs[3][3];
160  int in_y_rng, in_uv_rng, out_y_rng, out_uv_rng;
161 
164 
165 // FIXME deal with odd width/heights
166 // FIXME faster linearize/delinearize implementation (integer pow)
167 // FIXME bt2020cl support (linearization between yuv/rgb step instead of between rgb/xyz)
168 // FIXME test that the values in (de)lin_lut don't exceed their container storage
169 // type size (only useful if we keep the LUT and don't move to fast integer pow)
170 // FIXME dithering if bitdepth goes down?
171 // FIXME bitexact for fate integration?
172 
173 // FIXME I'm pretty sure gamma22/28 also have a linear toe slope, but I can't
174 // find any actual tables that document their real values...
175 // See http://www.13thmonkey.org/~boris/gammacorrection/ first graph why it matters
177  [AVCOL_TRC_BT709] = { 1.099, 0.018, 0.45, 4.5 },
178  [AVCOL_TRC_GAMMA22] = { 1.0, 0.0, 1.0 / 2.2, 0.0 },
179  [AVCOL_TRC_GAMMA28] = { 1.0, 0.0, 1.0 / 2.8, 0.0 },
180  [AVCOL_TRC_SMPTE170M] = { 1.099, 0.018, 0.45, 4.5 },
181  [AVCOL_TRC_SMPTE240M] = { 1.1115, 0.0228, 0.45, 4.0 },
182  [AVCOL_TRC_IEC61966_2_1] = { 1.055, 0.0031308, 1.0 / 2.4, 12.92 },
183  [AVCOL_TRC_IEC61966_2_4] = { 1.099, 0.018, 0.45, 4.5 },
184  [AVCOL_TRC_BT2020_10] = { 1.099, 0.018, 0.45, 4.5 },
185  [AVCOL_TRC_BT2020_12] = { 1.0993, 0.0181, 0.45, 4.5 },
186 };
187 
188 static const struct TransferCharacteristics *
190 {
191  const struct TransferCharacteristics *coeffs;
192 
193  if (trc >= AVCOL_TRC_NB)
194  return NULL;
195  coeffs = &transfer_characteristics[trc];
196  if (!coeffs->alpha)
197  return NULL;
198 
199  return coeffs;
200 }
201 
203  [WP_D65] = { 0.3127, 0.3290 },
204  [WP_C] = { 0.3100, 0.3160 },
205  [WP_DCI] = { 0.3140, 0.3510 },
206  [WP_E] = { 1/3.0f, 1/3.0f },
207 };
208 
210  [AVCOL_PRI_BT709] = { WP_D65, { 0.640, 0.330, 0.300, 0.600, 0.150, 0.060 } },
211  [AVCOL_PRI_BT470M] = { WP_C, { 0.670, 0.330, 0.210, 0.710, 0.140, 0.080 } },
212  [AVCOL_PRI_BT470BG] = { WP_D65, { 0.640, 0.330, 0.290, 0.600, 0.150, 0.060 } },
213  [AVCOL_PRI_SMPTE170M] = { WP_D65, { 0.630, 0.340, 0.310, 0.595, 0.155, 0.070 } },
214  [AVCOL_PRI_SMPTE240M] = { WP_D65, { 0.630, 0.340, 0.310, 0.595, 0.155, 0.070 } },
215  [AVCOL_PRI_SMPTE428] = { WP_E, { 0.735, 0.265, 0.274, 0.718, 0.167, 0.009 } },
216  [AVCOL_PRI_SMPTE431] = { WP_DCI, { 0.680, 0.320, 0.265, 0.690, 0.150, 0.060 } },
217  [AVCOL_PRI_SMPTE432] = { WP_D65, { 0.680, 0.320, 0.265, 0.690, 0.150, 0.060 } },
218  [AVCOL_PRI_FILM] = { WP_C, { 0.681, 0.319, 0.243, 0.692, 0.145, 0.049 } },
219  [AVCOL_PRI_BT2020] = { WP_D65, { 0.708, 0.292, 0.170, 0.797, 0.131, 0.046 } },
220  [AVCOL_PRI_JEDEC_P22] = { WP_D65, { 0.630, 0.340, 0.295, 0.605, 0.155, 0.077 } },
221 };
222 
224 {
225  const struct ColorPrimaries *p;
226 
227  if (prm >= AVCOL_PRI_NB)
228  return NULL;
229  p = &color_primaries[prm];
230  if (!p->coeff.xr)
231  return NULL;
232 
233  return p;
234 }
235 
237 {
238  int n;
239  double in_alpha = s->in_txchr->alpha, in_beta = s->in_txchr->beta;
240  double in_gamma = s->in_txchr->gamma, in_delta = s->in_txchr->delta;
241  double in_ialpha = 1.0 / in_alpha, in_igamma = 1.0 / in_gamma, in_idelta = 1.0 / in_delta;
242  double out_alpha = s->out_txchr->alpha, out_beta = s->out_txchr->beta;
243  double out_gamma = s->out_txchr->gamma, out_delta = s->out_txchr->delta;
244 
245  s->lin_lut = av_malloc(sizeof(*s->lin_lut) * 32768 * 2);
246  if (!s->lin_lut)
247  return AVERROR(ENOMEM);
248  s->delin_lut = &s->lin_lut[32768];
249  for (n = 0; n < 32768; n++) {
250  double v = (n - 2048.0) / 28672.0, d, l;
251 
252  // delinearize
253  if (v <= -out_beta) {
254  d = -out_alpha * pow(-v, out_gamma) + (out_alpha - 1.0);
255  } else if (v < out_beta) {
256  d = out_delta * v;
257  } else {
258  d = out_alpha * pow(v, out_gamma) - (out_alpha - 1.0);
259  }
260  s->delin_lut[n] = av_clip_int16(lrint(d * 28672.0));
261 
262  // linearize
263  if (v <= -in_beta * in_delta) {
264  l = -pow((1.0 - in_alpha - v) * in_ialpha, in_igamma);
265  } else if (v < in_beta * in_delta) {
266  l = v * in_idelta;
267  } else {
268  l = pow((v + in_alpha - 1.0) * in_ialpha, in_igamma);
269  }
270  s->lin_lut[n] = av_clip_int16(lrint(l * 28672.0));
271  }
272 
273  return 0;
274 }
275 
276 /*
277  * See http://www.brucelindbloom.com/index.html?Eqn_ChromAdapt.html
278  * This function uses the Bradford mechanism.
279  */
280 static void fill_whitepoint_conv_table(double out[3][3], enum WhitepointAdaptation wp_adapt,
281  enum Whitepoint src, enum Whitepoint dst)
282 {
283  static const double ma_tbl[NB_WP_ADAPT_NON_IDENTITY][3][3] = {
284  [WP_ADAPT_BRADFORD] = {
285  { 0.8951, 0.2664, -0.1614 },
286  { -0.7502, 1.7135, 0.0367 },
287  { 0.0389, -0.0685, 1.0296 },
288  }, [WP_ADAPT_VON_KRIES] = {
289  { 0.40024, 0.70760, -0.08081 },
290  { -0.22630, 1.16532, 0.04570 },
291  { 0.00000, 0.00000, 0.91822 },
292  },
293  };
294  const double (*ma)[3] = ma_tbl[wp_adapt];
295  const struct WhitepointCoefficients *wp_src = &whitepoint_coefficients[src];
296  double zw_src = 1.0 - wp_src->xw - wp_src->yw;
297  const struct WhitepointCoefficients *wp_dst = &whitepoint_coefficients[dst];
298  double zw_dst = 1.0 - wp_dst->xw - wp_dst->yw;
299  double mai[3][3], fac[3][3], tmp[3][3];
300  double rs, gs, bs, rd, gd, bd;
301 
302  ff_matrix_invert_3x3(ma, mai);
303  rs = ma[0][0] * wp_src->xw + ma[0][1] * wp_src->yw + ma[0][2] * zw_src;
304  gs = ma[1][0] * wp_src->xw + ma[1][1] * wp_src->yw + ma[1][2] * zw_src;
305  bs = ma[2][0] * wp_src->xw + ma[2][1] * wp_src->yw + ma[2][2] * zw_src;
306  rd = ma[0][0] * wp_dst->xw + ma[0][1] * wp_dst->yw + ma[0][2] * zw_dst;
307  gd = ma[1][0] * wp_dst->xw + ma[1][1] * wp_dst->yw + ma[1][2] * zw_dst;
308  bd = ma[2][0] * wp_dst->xw + ma[2][1] * wp_dst->yw + ma[2][2] * zw_dst;
309  fac[0][0] = rd / rs;
310  fac[1][1] = gd / gs;
311  fac[2][2] = bd / bs;
312  fac[0][1] = fac[0][2] = fac[1][0] = fac[1][2] = fac[2][0] = fac[2][1] = 0.0;
313  ff_matrix_mul_3x3(tmp, ma, fac);
314  ff_matrix_mul_3x3(out, tmp, mai);
315 }
316 
317 static void apply_lut(int16_t *buf[3], ptrdiff_t stride,
318  int w, int h, const int16_t *lut)
319 {
320  int y, x, n;
321 
322  for (n = 0; n < 3; n++) {
323  int16_t *data = buf[n];
324 
325  for (y = 0; y < h; y++) {
326  for (x = 0; x < w; x++)
327  data[x] = lut[av_clip_uintp2(2048 + data[x], 15)];
328 
329  data += stride;
330  }
331  }
332 }
333 
334 typedef struct ThreadData {
335  AVFrame *in, *out;
336  ptrdiff_t in_linesize[3], out_linesize[3];
337  int in_ss_h, out_ss_h;
338 } ThreadData;
339 
340 static int convert(AVFilterContext *ctx, void *data, int job_nr, int n_jobs)
341 {
342  const ThreadData *td = data;
343  ColorSpaceContext *s = ctx->priv;
344  uint8_t *in_data[3], *out_data[3];
345  int16_t *rgb[3];
346  int h_in = (td->in->height + 1) >> 1;
347  int h1 = 2 * (job_nr * h_in / n_jobs), h2 = 2 * ((job_nr + 1) * h_in / n_jobs);
348  int w = td->in->width, h = h2 - h1;
349 
350  in_data[0] = td->in->data[0] + td->in_linesize[0] * h1;
351  in_data[1] = td->in->data[1] + td->in_linesize[1] * (h1 >> td->in_ss_h);
352  in_data[2] = td->in->data[2] + td->in_linesize[2] * (h1 >> td->in_ss_h);
353  out_data[0] = td->out->data[0] + td->out_linesize[0] * h1;
354  out_data[1] = td->out->data[1] + td->out_linesize[1] * (h1 >> td->out_ss_h);
355  out_data[2] = td->out->data[2] + td->out_linesize[2] * (h1 >> td->out_ss_h);
356  rgb[0] = s->rgb[0] + s->rgb_stride * h1;
357  rgb[1] = s->rgb[1] + s->rgb_stride * h1;
358  rgb[2] = s->rgb[2] + s->rgb_stride * h1;
359 
360  // FIXME for simd, also make sure we do pictures with negative stride
361  // top-down so we don't overwrite lines with padding of data before it
362  // in the same buffer (same as swscale)
363 
364  if (s->yuv2yuv_fastmode) {
365  // FIXME possibly use a fast mode in case only the y range changes?
366  // since in that case, only the diagonal entries in yuv2yuv_coeffs[]
367  // are non-zero
368  s->yuv2yuv(out_data, td->out_linesize, in_data, td->in_linesize, w, h,
369  s->yuv2yuv_coeffs, s->yuv_offset);
370  } else {
371  // FIXME maybe (for caching efficiency) do pipeline per-line instead of
372  // full buffer per function? (Or, since yuv2rgb requires 2 lines: per
373  // 2 lines, for yuv420.)
374  /*
375  * General design:
376  * - yuv2rgb converts from whatever range the input was ([16-235/240] or
377  * [0,255] or the 10/12bpp equivalents thereof) to an integer version
378  * of RGB in psuedo-restricted 15+sign bits. That means that the float
379  * range [0.0,1.0] is in [0,28762], and the remainder of the int16_t
380  * range is used for overflow/underflow outside the representable
381  * range of this RGB type. rgb2yuv is the exact opposite.
382  * - gamma correction is done using a LUT since that appears to work
383  * fairly fast.
384  * - If the input is chroma-subsampled (420/422), the yuv2rgb conversion
385  * (or rgb2yuv conversion) uses nearest-neighbour sampling to read
386  * read chroma pixels at luma resolution. If you want some more fancy
387  * filter, you can use swscale to convert to yuv444p.
388  * - all coefficients are 14bit (so in the [-2.0,2.0] range).
389  */
390  s->yuv2rgb(rgb, s->rgb_stride, in_data, td->in_linesize, w, h,
391  s->yuv2rgb_coeffs, s->yuv_offset[0]);
392  if (!s->rgb2rgb_passthrough) {
393  apply_lut(rgb, s->rgb_stride, w, h, s->lin_lut);
394  if (!s->lrgb2lrgb_passthrough)
395  s->dsp.multiply3x3(rgb, s->rgb_stride, w, h, s->lrgb2lrgb_coeffs);
396  apply_lut(rgb, s->rgb_stride, w, h, s->delin_lut);
397  }
398  if (s->dither == DITHER_FSB) {
399  s->rgb2yuv_fsb(out_data, td->out_linesize, rgb, s->rgb_stride, w, h,
401  } else {
402  s->rgb2yuv(out_data, td->out_linesize, rgb, s->rgb_stride, w, h,
403  s->rgb2yuv_coeffs, s->yuv_offset[1]);
404  }
405  }
406 
407  return 0;
408 }
409 
410 static int get_range_off(AVFilterContext *ctx, int *off,
411  int *y_rng, int *uv_rng,
412  enum AVColorRange rng, int depth)
413 {
414  switch (rng) {
416  ColorSpaceContext *s = ctx->priv;
417 
418  if (!s->did_warn_range) {
419  av_log(ctx, AV_LOG_WARNING, "Input range not set, assuming tv/mpeg\n");
420  s->did_warn_range = 1;
421  }
422  }
423  // fall-through
424  case AVCOL_RANGE_MPEG:
425  *off = 16 << (depth - 8);
426  *y_rng = 219 << (depth - 8);
427  *uv_rng = 224 << (depth - 8);
428  break;
429  case AVCOL_RANGE_JPEG:
430  *off = 0;
431  *y_rng = *uv_rng = (256 << (depth - 8)) - 1;
432  break;
433  default:
434  return AVERROR(EINVAL);
435  }
436 
437  return 0;
438 }
439 
441  const AVFrame *in, const AVFrame *out)
442 {
443  ColorSpaceContext *s = ctx->priv;
444  const AVPixFmtDescriptor *in_desc = av_pix_fmt_desc_get(in->format);
445  const AVPixFmtDescriptor *out_desc = av_pix_fmt_desc_get(out->format);
446  int emms = 0, m, n, o, res, fmt_identical, redo_yuv2rgb = 0, redo_rgb2yuv = 0;
447 
448 #define supported_depth(d) ((d) == 8 || (d) == 10 || (d) == 12)
449 #define supported_subsampling(lcw, lch) \
450  (((lcw) == 0 && (lch) == 0) || ((lcw) == 1 && (lch) == 0) || ((lcw) == 1 && (lch) == 1))
451 #define supported_format(d) \
452  ((d) != NULL && (d)->nb_components == 3 && \
453  !((d)->flags & AV_PIX_FMT_FLAG_RGB) && \
454  supported_depth((d)->comp[0].depth) && \
455  supported_subsampling((d)->log2_chroma_w, (d)->log2_chroma_h))
456 
457  if (!supported_format(in_desc)) {
458  av_log(ctx, AV_LOG_ERROR,
459  "Unsupported input format %d (%s) or bitdepth (%d)\n",
461  in_desc ? in_desc->comp[0].depth : -1);
462  return AVERROR(EINVAL);
463  }
464  if (!supported_format(out_desc)) {
465  av_log(ctx, AV_LOG_ERROR,
466  "Unsupported output format %d (%s) or bitdepth (%d)\n",
467  out->format, av_get_pix_fmt_name(out->format),
468  out_desc ? out_desc->comp[0].depth : -1);
469  return AVERROR(EINVAL);
470  }
471 
472  if (in->color_primaries != s->in_prm) s->in_primaries = NULL;
473  if (out->color_primaries != s->out_prm) s->out_primaries = NULL;
474  if (in->color_trc != s->in_trc) s->in_txchr = NULL;
475  if (out->color_trc != s->out_trc) s->out_txchr = NULL;
476  if (in->colorspace != s->in_csp ||
477  in->color_range != s->in_rng) s->in_lumacoef = NULL;
478  if (out->colorspace != s->out_csp ||
479  out->color_range != s->out_rng) s->out_lumacoef = NULL;
480 
481  if (!s->out_primaries || !s->in_primaries) {
482  s->in_prm = in->color_primaries;
483  if (s->user_iall != CS_UNSPECIFIED)
484  s->in_prm = default_prm[FFMIN(s->user_iall, CS_NB)];
486  s->in_prm = s->user_iprm;
487  s->in_primaries = get_color_primaries(s->in_prm);
488  if (!s->in_primaries) {
489  av_log(ctx, AV_LOG_ERROR,
490  "Unsupported input primaries %d (%s)\n",
491  s->in_prm, av_color_primaries_name(s->in_prm));
492  return AVERROR(EINVAL);
493  }
494  s->out_prm = out->color_primaries;
495  s->out_primaries = get_color_primaries(s->out_prm);
496  if (!s->out_primaries) {
497  if (s->out_prm == AVCOL_PRI_UNSPECIFIED) {
498  if (s->user_all == CS_UNSPECIFIED) {
499  av_log(ctx, AV_LOG_ERROR, "Please specify output primaries\n");
500  } else {
501  av_log(ctx, AV_LOG_ERROR,
502  "Unsupported output color property %d\n", s->user_all);
503  }
504  } else {
505  av_log(ctx, AV_LOG_ERROR,
506  "Unsupported output primaries %d (%s)\n",
507  s->out_prm, av_color_primaries_name(s->out_prm));
508  }
509  return AVERROR(EINVAL);
510  }
512  sizeof(*s->in_primaries));
513  if (!s->lrgb2lrgb_passthrough) {
514  double rgb2xyz[3][3], xyz2rgb[3][3], rgb2rgb[3][3];
515  const struct WhitepointCoefficients *wp_out, *wp_in;
516 
517  wp_out = &whitepoint_coefficients[s->out_primaries->wp];
518  wp_in = &whitepoint_coefficients[s->in_primaries->wp];
519  ff_fill_rgb2xyz_table(&s->out_primaries->coeff, wp_out, rgb2xyz);
520  ff_matrix_invert_3x3(rgb2xyz, xyz2rgb);
521  ff_fill_rgb2xyz_table(&s->in_primaries->coeff, wp_in, rgb2xyz);
522  if (s->out_primaries->wp != s->in_primaries->wp &&
523  s->wp_adapt != WP_ADAPT_IDENTITY) {
524  double wpconv[3][3], tmp[3][3];
525 
527  s->out_primaries->wp);
528  ff_matrix_mul_3x3(tmp, rgb2xyz, wpconv);
529  ff_matrix_mul_3x3(rgb2rgb, tmp, xyz2rgb);
530  } else {
531  ff_matrix_mul_3x3(rgb2rgb, rgb2xyz, xyz2rgb);
532  }
533  for (m = 0; m < 3; m++)
534  for (n = 0; n < 3; n++) {
535  s->lrgb2lrgb_coeffs[m][n][0] = lrint(16384.0 * rgb2rgb[m][n]);
536  for (o = 1; o < 8; o++)
537  s->lrgb2lrgb_coeffs[m][n][o] = s->lrgb2lrgb_coeffs[m][n][0];
538  }
539 
540  emms = 1;
541  }
542  }
543 
544  if (!s->in_txchr) {
545  av_freep(&s->lin_lut);
546  s->in_trc = in->color_trc;
547  if (s->user_iall != CS_UNSPECIFIED)
548  s->in_trc = default_trc[FFMIN(s->user_iall, CS_NB)];
550  s->in_trc = s->user_itrc;
551  s->in_txchr = get_transfer_characteristics(s->in_trc);
552  if (!s->in_txchr) {
553  av_log(ctx, AV_LOG_ERROR,
554  "Unsupported input transfer characteristics %d (%s)\n",
555  s->in_trc, av_color_transfer_name(s->in_trc));
556  return AVERROR(EINVAL);
557  }
558  }
559 
560  if (!s->out_txchr) {
561  av_freep(&s->lin_lut);
562  s->out_trc = out->color_trc;
563  s->out_txchr = get_transfer_characteristics(s->out_trc);
564  if (!s->out_txchr) {
565  if (s->out_trc == AVCOL_TRC_UNSPECIFIED) {
566  if (s->user_all == CS_UNSPECIFIED) {
567  av_log(ctx, AV_LOG_ERROR,
568  "Please specify output transfer characteristics\n");
569  } else {
570  av_log(ctx, AV_LOG_ERROR,
571  "Unsupported output color property %d\n", s->user_all);
572  }
573  } else {
574  av_log(ctx, AV_LOG_ERROR,
575  "Unsupported output transfer characteristics %d (%s)\n",
576  s->out_trc, av_color_transfer_name(s->out_trc));
577  }
578  return AVERROR(EINVAL);
579  }
580  }
581 
583  !memcmp(s->in_txchr, s->out_txchr, sizeof(*s->in_txchr)));
584  if (!s->rgb2rgb_passthrough && !s->lin_lut) {
585  res = fill_gamma_table(s);
586  if (res < 0)
587  return res;
588  emms = 1;
589  }
590 
591  if (!s->in_lumacoef) {
592  s->in_csp = in->colorspace;
593  if (s->user_iall != CS_UNSPECIFIED)
594  s->in_csp = default_csp[FFMIN(s->user_iall, CS_NB)];
596  s->in_csp = s->user_icsp;
597  s->in_rng = in->color_range;
599  s->in_rng = s->user_irng;
600  s->in_lumacoef = ff_get_luma_coefficients(s->in_csp);
601  if (!s->in_lumacoef) {
602  av_log(ctx, AV_LOG_ERROR,
603  "Unsupported input colorspace %d (%s)\n",
604  s->in_csp, av_color_space_name(s->in_csp));
605  return AVERROR(EINVAL);
606  }
607  redo_yuv2rgb = 1;
608  }
609 
610  if (!s->out_lumacoef) {
611  s->out_csp = out->colorspace;
612  s->out_rng = out->color_range;
613  s->out_lumacoef = ff_get_luma_coefficients(s->out_csp);
614  if (!s->out_lumacoef) {
615  if (s->out_csp == AVCOL_SPC_UNSPECIFIED) {
616  if (s->user_all == CS_UNSPECIFIED) {
617  av_log(ctx, AV_LOG_ERROR,
618  "Please specify output transfer characteristics\n");
619  } else {
620  av_log(ctx, AV_LOG_ERROR,
621  "Unsupported output color property %d\n", s->user_all);
622  }
623  } else {
624  av_log(ctx, AV_LOG_ERROR,
625  "Unsupported output transfer characteristics %d (%s)\n",
626  s->out_csp, av_color_space_name(s->out_csp));
627  }
628  return AVERROR(EINVAL);
629  }
630  redo_rgb2yuv = 1;
631  }
632 
633  fmt_identical = in_desc->log2_chroma_h == out_desc->log2_chroma_h &&
634  in_desc->log2_chroma_w == out_desc->log2_chroma_w;
635  s->yuv2yuv_fastmode = s->rgb2rgb_passthrough && fmt_identical;
636  s->yuv2yuv_passthrough = s->yuv2yuv_fastmode && s->in_rng == s->out_rng &&
637  !memcmp(s->in_lumacoef, s->out_lumacoef,
638  sizeof(*s->in_lumacoef)) &&
639  in_desc->comp[0].depth == out_desc->comp[0].depth;
640  if (!s->yuv2yuv_passthrough) {
641  if (redo_yuv2rgb) {
642  double rgb2yuv[3][3], (*yuv2rgb)[3] = s->yuv2rgb_dbl_coeffs;
643  int off, bits, in_rng;
644 
645  res = get_range_off(ctx, &off, &s->in_y_rng, &s->in_uv_rng,
646  s->in_rng, in_desc->comp[0].depth);
647  if (res < 0) {
648  av_log(ctx, AV_LOG_ERROR,
649  "Unsupported input color range %d (%s)\n",
650  s->in_rng, av_color_range_name(s->in_rng));
651  return res;
652  }
653  for (n = 0; n < 8; n++)
654  s->yuv_offset[0][n] = off;
655  ff_fill_rgb2yuv_table(s->in_lumacoef, rgb2yuv);
656  ff_matrix_invert_3x3(rgb2yuv, yuv2rgb);
657  bits = 1 << (in_desc->comp[0].depth - 1);
658  for (n = 0; n < 3; n++) {
659  for (in_rng = s->in_y_rng, m = 0; m < 3; m++, in_rng = s->in_uv_rng) {
660  s->yuv2rgb_coeffs[n][m][0] = lrint(28672 * bits * yuv2rgb[n][m] / in_rng);
661  for (o = 1; o < 8; o++)
662  s->yuv2rgb_coeffs[n][m][o] = s->yuv2rgb_coeffs[n][m][0];
663  }
664  }
665  av_assert2(s->yuv2rgb_coeffs[0][1][0] == 0);
666  av_assert2(s->yuv2rgb_coeffs[2][2][0] == 0);
667  av_assert2(s->yuv2rgb_coeffs[0][0][0] == s->yuv2rgb_coeffs[1][0][0]);
668  av_assert2(s->yuv2rgb_coeffs[0][0][0] == s->yuv2rgb_coeffs[2][0][0]);
669  s->yuv2rgb = s->dsp.yuv2rgb[(in_desc->comp[0].depth - 8) >> 1]
670  [in_desc->log2_chroma_h + in_desc->log2_chroma_w];
671  emms = 1;
672  }
673 
674  if (redo_rgb2yuv) {
675  double (*rgb2yuv)[3] = s->rgb2yuv_dbl_coeffs;
676  int off, out_rng, bits;
677 
678  res = get_range_off(ctx, &off, &s->out_y_rng, &s->out_uv_rng,
679  s->out_rng, out_desc->comp[0].depth);
680  if (res < 0) {
681  av_log(ctx, AV_LOG_ERROR,
682  "Unsupported output color range %d (%s)\n",
683  s->out_rng, av_color_range_name(s->out_rng));
684  return res;
685  }
686  for (n = 0; n < 8; n++)
687  s->yuv_offset[1][n] = off;
689  bits = 1 << (29 - out_desc->comp[0].depth);
690  for (out_rng = s->out_y_rng, n = 0; n < 3; n++, out_rng = s->out_uv_rng) {
691  for (m = 0; m < 3; m++) {
692  s->rgb2yuv_coeffs[n][m][0] = lrint(bits * out_rng * rgb2yuv[n][m] / 28672);
693  for (o = 1; o < 8; o++)
694  s->rgb2yuv_coeffs[n][m][o] = s->rgb2yuv_coeffs[n][m][0];
695  }
696  }
697  av_assert2(s->rgb2yuv_coeffs[1][2][0] == s->rgb2yuv_coeffs[2][0][0]);
698  s->rgb2yuv = s->dsp.rgb2yuv[(out_desc->comp[0].depth - 8) >> 1]
699  [out_desc->log2_chroma_h + out_desc->log2_chroma_w];
700  s->rgb2yuv_fsb = s->dsp.rgb2yuv_fsb[(out_desc->comp[0].depth - 8) >> 1]
701  [out_desc->log2_chroma_h + out_desc->log2_chroma_w];
702  emms = 1;
703  }
704 
705  if (s->yuv2yuv_fastmode && (redo_yuv2rgb || redo_rgb2yuv)) {
706  int idepth = in_desc->comp[0].depth, odepth = out_desc->comp[0].depth;
707  double (*rgb2yuv)[3] = s->rgb2yuv_dbl_coeffs;
708  double (*yuv2rgb)[3] = s->yuv2rgb_dbl_coeffs;
709  double yuv2yuv[3][3];
710  int in_rng, out_rng;
711 
712  ff_matrix_mul_3x3(yuv2yuv, yuv2rgb, rgb2yuv);
713  for (out_rng = s->out_y_rng, m = 0; m < 3; m++, out_rng = s->out_uv_rng) {
714  for (in_rng = s->in_y_rng, n = 0; n < 3; n++, in_rng = s->in_uv_rng) {
715  s->yuv2yuv_coeffs[m][n][0] =
716  lrint(16384 * yuv2yuv[m][n] * out_rng * (1 << idepth) /
717  (in_rng * (1 << odepth)));
718  for (o = 1; o < 8; o++)
719  s->yuv2yuv_coeffs[m][n][o] = s->yuv2yuv_coeffs[m][n][0];
720  }
721  }
722  av_assert2(s->yuv2yuv_coeffs[1][0][0] == 0);
723  av_assert2(s->yuv2yuv_coeffs[2][0][0] == 0);
724  s->yuv2yuv = s->dsp.yuv2yuv[(idepth - 8) >> 1][(odepth - 8) >> 1]
725  [in_desc->log2_chroma_h + in_desc->log2_chroma_w];
726  }
727  }
728 
729  if (emms)
730  emms_c();
731 
732  return 0;
733 }
734 
736 {
737  ColorSpaceContext *s = ctx->priv;
738 
740 
741  return 0;
742 }
743 
745 {
746  ColorSpaceContext *s = ctx->priv;
747 
748  av_freep(&s->rgb[0]);
749  av_freep(&s->rgb[1]);
750  av_freep(&s->rgb[2]);
751  s->rgb_sz = 0;
752  av_freep(&s->dither_scratch_base[0][0]);
753  av_freep(&s->dither_scratch_base[0][1]);
754  av_freep(&s->dither_scratch_base[1][0]);
755  av_freep(&s->dither_scratch_base[1][1]);
756  av_freep(&s->dither_scratch_base[2][0]);
757  av_freep(&s->dither_scratch_base[2][1]);
758 
759  av_freep(&s->lin_lut);
760 }
761 
763 {
764  AVFilterContext *ctx = link->dst;
765  AVFilterLink *outlink = ctx->outputs[0];
766  ColorSpaceContext *s = ctx->priv;
767  // FIXME if yuv2yuv_passthrough, don't get a new buffer but use the
768  // input one if it is writable *OR* the actual literal values of in_*
769  // and out_* are identical (not just their respective properties)
770  AVFrame *out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
771  int res;
772  ptrdiff_t rgb_stride = FFALIGN(in->width * sizeof(int16_t), 32);
773  unsigned rgb_sz = rgb_stride * in->height;
774  ThreadData td;
775 
776  if (!out) {
777  av_frame_free(&in);
778  return AVERROR(ENOMEM);
779  }
780  res = av_frame_copy_props(out, in);
781  if (res < 0) {
782  av_frame_free(&in);
783  av_frame_free(&out);
784  return res;
785  }
786 
787  out->color_primaries = s->user_prm == AVCOL_PRI_UNSPECIFIED ?
788  default_prm[FFMIN(s->user_all, CS_NB)] : s->user_prm;
789  if (s->user_trc == AVCOL_TRC_UNSPECIFIED) {
791 
792  out->color_trc = default_trc[FFMIN(s->user_all, CS_NB)];
793  if (out->color_trc == AVCOL_TRC_BT2020_10 && desc && desc->comp[0].depth >= 12)
795  } else {
796  out->color_trc = s->user_trc;
797  }
798  out->colorspace = s->user_csp == AVCOL_SPC_UNSPECIFIED ?
799  default_csp[FFMIN(s->user_all, CS_NB)] : s->user_csp;
800  out->color_range = s->user_rng == AVCOL_RANGE_UNSPECIFIED ?
801  in->color_range : s->user_rng;
802  if (rgb_sz != s->rgb_sz) {
804  int uvw = in->width >> desc->log2_chroma_w;
805 
806  av_freep(&s->rgb[0]);
807  av_freep(&s->rgb[1]);
808  av_freep(&s->rgb[2]);
809  s->rgb_sz = 0;
810  av_freep(&s->dither_scratch_base[0][0]);
811  av_freep(&s->dither_scratch_base[0][1]);
812  av_freep(&s->dither_scratch_base[1][0]);
813  av_freep(&s->dither_scratch_base[1][1]);
814  av_freep(&s->dither_scratch_base[2][0]);
815  av_freep(&s->dither_scratch_base[2][1]);
816 
817  s->rgb[0] = av_malloc(rgb_sz);
818  s->rgb[1] = av_malloc(rgb_sz);
819  s->rgb[2] = av_malloc(rgb_sz);
820  s->dither_scratch_base[0][0] =
821  av_malloc(sizeof(*s->dither_scratch_base[0][0]) * (in->width + 4));
822  s->dither_scratch_base[0][1] =
823  av_malloc(sizeof(*s->dither_scratch_base[0][1]) * (in->width + 4));
824  s->dither_scratch_base[1][0] =
825  av_malloc(sizeof(*s->dither_scratch_base[1][0]) * (uvw + 4));
826  s->dither_scratch_base[1][1] =
827  av_malloc(sizeof(*s->dither_scratch_base[1][1]) * (uvw + 4));
828  s->dither_scratch_base[2][0] =
829  av_malloc(sizeof(*s->dither_scratch_base[2][0]) * (uvw + 4));
830  s->dither_scratch_base[2][1] =
831  av_malloc(sizeof(*s->dither_scratch_base[2][1]) * (uvw + 4));
832  s->dither_scratch[0][0] = &s->dither_scratch_base[0][0][1];
833  s->dither_scratch[0][1] = &s->dither_scratch_base[0][1][1];
834  s->dither_scratch[1][0] = &s->dither_scratch_base[1][0][1];
835  s->dither_scratch[1][1] = &s->dither_scratch_base[1][1][1];
836  s->dither_scratch[2][0] = &s->dither_scratch_base[2][0][1];
837  s->dither_scratch[2][1] = &s->dither_scratch_base[2][1][1];
838  if (!s->rgb[0] || !s->rgb[1] || !s->rgb[2] ||
839  !s->dither_scratch_base[0][0] || !s->dither_scratch_base[0][1] ||
840  !s->dither_scratch_base[1][0] || !s->dither_scratch_base[1][1] ||
841  !s->dither_scratch_base[2][0] || !s->dither_scratch_base[2][1]) {
842  uninit(ctx);
843  av_frame_free(&in);
844  av_frame_free(&out);
845  return AVERROR(ENOMEM);
846  }
847  s->rgb_sz = rgb_sz;
848  }
849  res = create_filtergraph(ctx, in, out);
850  if (res < 0) {
851  av_frame_free(&in);
852  av_frame_free(&out);
853  return res;
854  }
855  s->rgb_stride = rgb_stride / sizeof(int16_t);
856  td.in = in;
857  td.out = out;
858  td.in_linesize[0] = in->linesize[0];
859  td.in_linesize[1] = in->linesize[1];
860  td.in_linesize[2] = in->linesize[2];
861  td.out_linesize[0] = out->linesize[0];
862  td.out_linesize[1] = out->linesize[1];
863  td.out_linesize[2] = out->linesize[2];
866  if (s->yuv2yuv_passthrough) {
867  res = av_frame_copy(out, in);
868  if (res < 0) {
869  av_frame_free(&in);
870  av_frame_free(&out);
871  return res;
872  }
873  } else {
874  ctx->internal->execute(ctx, convert, &td, NULL,
875  FFMIN((in->height + 1) >> 1, ff_filter_get_nb_threads(ctx)));
876  }
877  av_frame_free(&in);
878 
879  return ff_filter_frame(outlink, out);
880 }
881 
883 {
884  static const enum AVPixelFormat pix_fmts[] = {
890  };
891  int res;
892  ColorSpaceContext *s = ctx->priv;
894 
895  if (!formats)
896  return AVERROR(ENOMEM);
897  if (s->user_format == AV_PIX_FMT_NONE)
898  return ff_set_common_formats(ctx, formats);
899  res = ff_formats_ref(formats, &ctx->inputs[0]->out_formats);
900  if (res < 0)
901  return res;
902  formats = NULL;
903  res = ff_add_format(&formats, s->user_format);
904  if (res < 0)
905  return res;
906 
907  return ff_formats_ref(formats, &ctx->outputs[0]->in_formats);
908 }
909 
910 static int config_props(AVFilterLink *outlink)
911 {
912  AVFilterContext *ctx = outlink->dst;
913  AVFilterLink *inlink = outlink->src->inputs[0];
914 
915  if (inlink->w % 2 || inlink->h % 2) {
916  av_log(ctx, AV_LOG_ERROR, "Invalid odd size (%dx%d)\n",
917  inlink->w, inlink->h);
918  return AVERROR_PATCHWELCOME;
919  }
920 
921  outlink->w = inlink->w;
922  outlink->h = inlink->h;
923  outlink->sample_aspect_ratio = inlink->sample_aspect_ratio;
924  outlink->time_base = inlink->time_base;
925 
926  return 0;
927 }
928 
929 #define OFFSET(x) offsetof(ColorSpaceContext, x)
930 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM | AV_OPT_FLAG_VIDEO_PARAM
931 #define ENUM(x, y, z) { x, "", 0, AV_OPT_TYPE_CONST, { .i64 = y }, INT_MIN, INT_MAX, FLAGS, z }
932 
933 static const AVOption colorspace_options[] = {
934  { "all", "Set all color properties together",
935  OFFSET(user_all), AV_OPT_TYPE_INT, { .i64 = CS_UNSPECIFIED },
936  CS_UNSPECIFIED, CS_NB - 1, FLAGS, "all" },
937  ENUM("bt470m", CS_BT470M, "all"),
938  ENUM("bt470bg", CS_BT470BG, "all"),
939  ENUM("bt601-6-525", CS_BT601_6_525, "all"),
940  ENUM("bt601-6-625", CS_BT601_6_625, "all"),
941  ENUM("bt709", CS_BT709, "all"),
942  ENUM("smpte170m", CS_SMPTE170M, "all"),
943  ENUM("smpte240m", CS_SMPTE240M, "all"),
944  ENUM("bt2020", CS_BT2020, "all"),
945 
946  { "space", "Output colorspace",
947  OFFSET(user_csp), AV_OPT_TYPE_INT, { .i64 = AVCOL_SPC_UNSPECIFIED },
948  AVCOL_SPC_RGB, AVCOL_SPC_NB - 1, FLAGS, "csp"},
949  ENUM("bt709", AVCOL_SPC_BT709, "csp"),
950  ENUM("fcc", AVCOL_SPC_FCC, "csp"),
951  ENUM("bt470bg", AVCOL_SPC_BT470BG, "csp"),
952  ENUM("smpte170m", AVCOL_SPC_SMPTE170M, "csp"),
953  ENUM("smpte240m", AVCOL_SPC_SMPTE240M, "csp"),
954  ENUM("ycgco", AVCOL_SPC_YCGCO, "csp"),
955  ENUM("gbr", AVCOL_SPC_RGB, "csp"),
956  ENUM("bt2020nc", AVCOL_SPC_BT2020_NCL, "csp"),
957  ENUM("bt2020ncl", AVCOL_SPC_BT2020_NCL, "csp"),
958 
959  { "range", "Output color range",
960  OFFSET(user_rng), AV_OPT_TYPE_INT, { .i64 = AVCOL_RANGE_UNSPECIFIED },
962  ENUM("tv", AVCOL_RANGE_MPEG, "rng"),
963  ENUM("mpeg", AVCOL_RANGE_MPEG, "rng"),
964  ENUM("pc", AVCOL_RANGE_JPEG, "rng"),
965  ENUM("jpeg", AVCOL_RANGE_JPEG, "rng"),
966 
967  { "primaries", "Output color primaries",
968  OFFSET(user_prm), AV_OPT_TYPE_INT, { .i64 = AVCOL_PRI_UNSPECIFIED },
969  AVCOL_PRI_RESERVED0, AVCOL_PRI_NB - 1, FLAGS, "prm" },
970  ENUM("bt709", AVCOL_PRI_BT709, "prm"),
971  ENUM("bt470m", AVCOL_PRI_BT470M, "prm"),
972  ENUM("bt470bg", AVCOL_PRI_BT470BG, "prm"),
973  ENUM("smpte170m", AVCOL_PRI_SMPTE170M, "prm"),
974  ENUM("smpte240m", AVCOL_PRI_SMPTE240M, "prm"),
975  ENUM("smpte428", AVCOL_PRI_SMPTE428, "prm"),
976  ENUM("film", AVCOL_PRI_FILM, "prm"),
977  ENUM("smpte431", AVCOL_PRI_SMPTE431, "prm"),
978  ENUM("smpte432", AVCOL_PRI_SMPTE432, "prm"),
979  ENUM("bt2020", AVCOL_PRI_BT2020, "prm"),
980  ENUM("jedec-p22", AVCOL_PRI_JEDEC_P22, "prm"),
981  ENUM("ebu3213", AVCOL_PRI_EBU3213, "prm"),
982 
983  { "trc", "Output transfer characteristics",
984  OFFSET(user_trc), AV_OPT_TYPE_INT, { .i64 = AVCOL_TRC_UNSPECIFIED },
985  AVCOL_TRC_RESERVED0, AVCOL_TRC_NB - 1, FLAGS, "trc" },
986  ENUM("bt709", AVCOL_TRC_BT709, "trc"),
987  ENUM("bt470m", AVCOL_TRC_GAMMA22, "trc"),
988  ENUM("gamma22", AVCOL_TRC_GAMMA22, "trc"),
989  ENUM("bt470bg", AVCOL_TRC_GAMMA28, "trc"),
990  ENUM("gamma28", AVCOL_TRC_GAMMA28, "trc"),
991  ENUM("smpte170m", AVCOL_TRC_SMPTE170M, "trc"),
992  ENUM("smpte240m", AVCOL_TRC_SMPTE240M, "trc"),
993  ENUM("srgb", AVCOL_TRC_IEC61966_2_1, "trc"),
994  ENUM("iec61966-2-1", AVCOL_TRC_IEC61966_2_1, "trc"),
995  ENUM("xvycc", AVCOL_TRC_IEC61966_2_4, "trc"),
996  ENUM("iec61966-2-4", AVCOL_TRC_IEC61966_2_4, "trc"),
997  ENUM("bt2020-10", AVCOL_TRC_BT2020_10, "trc"),
998  ENUM("bt2020-12", AVCOL_TRC_BT2020_12, "trc"),
999 
1000  { "format", "Output pixel format",
1001  OFFSET(user_format), AV_OPT_TYPE_INT, { .i64 = AV_PIX_FMT_NONE },
1003  ENUM("yuv420p", AV_PIX_FMT_YUV420P, "fmt"),
1004  ENUM("yuv420p10", AV_PIX_FMT_YUV420P10, "fmt"),
1005  ENUM("yuv420p12", AV_PIX_FMT_YUV420P12, "fmt"),
1006  ENUM("yuv422p", AV_PIX_FMT_YUV422P, "fmt"),
1007  ENUM("yuv422p10", AV_PIX_FMT_YUV422P10, "fmt"),
1008  ENUM("yuv422p12", AV_PIX_FMT_YUV422P12, "fmt"),
1009  ENUM("yuv444p", AV_PIX_FMT_YUV444P, "fmt"),
1010  ENUM("yuv444p10", AV_PIX_FMT_YUV444P10, "fmt"),
1011  ENUM("yuv444p12", AV_PIX_FMT_YUV444P12, "fmt"),
1012 
1013  { "fast", "Ignore primary chromaticity and gamma correction",
1014  OFFSET(fast_mode), AV_OPT_TYPE_BOOL, { .i64 = 0 },
1015  0, 1, FLAGS },
1016 
1017  { "dither", "Dithering mode",
1018  OFFSET(dither), AV_OPT_TYPE_INT, { .i64 = DITHER_NONE },
1019  DITHER_NONE, DITHER_NB - 1, FLAGS, "dither" },
1020  ENUM("none", DITHER_NONE, "dither"),
1021  ENUM("fsb", DITHER_FSB, "dither"),
1022 
1023  { "wpadapt", "Whitepoint adaptation method",
1024  OFFSET(wp_adapt), AV_OPT_TYPE_INT, { .i64 = WP_ADAPT_BRADFORD },
1025  WP_ADAPT_BRADFORD, NB_WP_ADAPT - 1, FLAGS, "wpadapt" },
1026  ENUM("bradford", WP_ADAPT_BRADFORD, "wpadapt"),
1027  ENUM("vonkries", WP_ADAPT_VON_KRIES, "wpadapt"),
1028  ENUM("identity", WP_ADAPT_IDENTITY, "wpadapt"),
1029 
1030  { "iall", "Set all input color properties together",
1031  OFFSET(user_iall), AV_OPT_TYPE_INT, { .i64 = CS_UNSPECIFIED },
1032  CS_UNSPECIFIED, CS_NB - 1, FLAGS, "all" },
1033  { "ispace", "Input colorspace",
1034  OFFSET(user_icsp), AV_OPT_TYPE_INT, { .i64 = AVCOL_SPC_UNSPECIFIED },
1035  AVCOL_PRI_RESERVED0, AVCOL_PRI_NB - 1, FLAGS, "csp" },
1036  { "irange", "Input color range",
1037  OFFSET(user_irng), AV_OPT_TYPE_INT, { .i64 = AVCOL_RANGE_UNSPECIFIED },
1039  { "iprimaries", "Input color primaries",
1040  OFFSET(user_iprm), AV_OPT_TYPE_INT, { .i64 = AVCOL_PRI_UNSPECIFIED },
1041  AVCOL_PRI_RESERVED0, AVCOL_PRI_NB - 1, FLAGS, "prm" },
1042  { "itrc", "Input transfer characteristics",
1043  OFFSET(user_itrc), AV_OPT_TYPE_INT, { .i64 = AVCOL_TRC_UNSPECIFIED },
1044  AVCOL_TRC_RESERVED0, AVCOL_TRC_NB - 1, FLAGS, "trc" },
1045 
1046  { NULL }
1047 };
1048 
1049 AVFILTER_DEFINE_CLASS(colorspace);
1050 
1051 static const AVFilterPad inputs[] = {
1052  {
1053  .name = "default",
1054  .type = AVMEDIA_TYPE_VIDEO,
1055  .filter_frame = filter_frame,
1056  },
1057  { NULL }
1058 };
1059 
1060 static const AVFilterPad outputs[] = {
1061  {
1062  .name = "default",
1063  .type = AVMEDIA_TYPE_VIDEO,
1064  .config_props = config_props,
1065  },
1066  { NULL }
1067 };
1068 
1070  .name = "colorspace",
1071  .description = NULL_IF_CONFIG_SMALL("Convert between colorspaces."),
1072  .init = init,
1073  .uninit = uninit,
1074  .query_formats = query_formats,
1075  .priv_size = sizeof(ColorSpaceContext),
1076  .priv_class = &colorspace_class,
1077  .inputs = inputs,
1078  .outputs = outputs,
1080 };
ITU-R BT2020 for 12-bit system.
Definition: pixfmt.h:484
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / SMPTE RP177 Annex B
Definition: pixfmt.h:499
EBU Tech. 3213-E / JEDEC P22 phosphors.
Definition: pixfmt.h:459
#define NULL
Definition: coverity.c:32
AVFrame * out
Definition: af_adeclick.c:488
IEC 61966-2-4.
Definition: pixfmt.h:480
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2522
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
rgb2yuv_fn rgb2yuv
int16_t yuv_offset[2][8]
static enum AVColorPrimaries default_prm[CS_NB+1]
Definition: vf_colorspace.c:86
AVOption.
Definition: opt.h:246
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:100
double yuv2rgb_dbl_coeffs[3][3]
#define ma
static void fn() rgb2yuv(uint8_t *_yuv[3], const ptrdiff_t yuv_stride[3], int16_t *rgb[3], ptrdiff_t s, int w, int h, const int16_t rgb2yuv_coeffs[3][3][8], const int16_t yuv_offset[8])
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
int * dither_scratch[3][2]
Main libavfilter public API header.
enum AVColorTransferCharacteristic in_trc out_trc user_trc user_itrc
const char * desc
Definition: nvenc.c:68
static const AVOption colorspace_options[]
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601 ...
Definition: pixfmt.h:503
static void yuv2rgb(uint8_t *out, int ridx, int Y, int U, int V)
Definition: g2meet.c:280
SMPTE ST 432-1 (2010) / P3 D65 / Display P3.
Definition: pixfmt.h:458
int16_t yuv2rgb_coeffs[3][3][8]
ptrdiff_t in_linesize[3]
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC
Definition: pixfmt.h:504
static int get_range_off(AVFilterContext *ctx, int *off, int *y_rng, int *uv_rng, enum AVColorRange rng, int depth)
SMPTE ST 431-2 (2011) / DCI P3.
Definition: pixfmt.h:457
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:391
static void fn() yuv2yuv(uint8_t *_dst[3], const ptrdiff_t dst_stride[3], uint8_t *_src[3], const ptrdiff_t src_stride[3], int w, int h, const int16_t c[3][3][8], const int16_t yuv_offset[2][8])
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:99
#define src
Definition: vp8dsp.c:254
order of coefficients is actually GBR, also IEC 61966-2-1 (sRGB)
Definition: pixfmt.h:498
enum DitherMode dither
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
Definition: pixdesc.h:92
AVColorTransferCharacteristic
Color Transfer Characteristic.
Definition: pixfmt.h:468
functionally identical to above
Definition: pixfmt.h:505
const char * av_color_space_name(enum AVColorSpace space)
Definition: pixdesc.c:2915
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:283
static const struct ColorPrimaries * get_color_primaries(enum AVColorPrimaries prm)
#define AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
Some filters support a generic "enable" expression option that can be used to enable or disable a fil...
Definition: avfilter.h:125
const char * name
Pad name.
Definition: internal.h:60
AVFilterLink ** inputs
array of pointers to input links
Definition: avfilter.h:346
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1093
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:117
void(* multiply3x3)(int16_t *data[3], ptrdiff_t stride, int w, int h, const int16_t m[3][3][8])
Definition: colorspacedsp.h:74
uint8_t
#define av_cold
Definition: attributes.h:82
#define av_malloc(s)
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:64
float delta
AVOptions.
AVColorSpace
YUV colorspace type.
Definition: pixfmt.h:497
static av_cold int init(AVFilterContext *ctx)
const char * av_color_range_name(enum AVColorRange range)
Definition: pixdesc.c:2848
AVFilter ff_vf_colorspace
enum Colorspace user_all user_iall
Used by Dirac / VC-2 and H.264 FRext, see ITU-T SG16.
Definition: pixfmt.h:506
enum Whitepoint wp
also ITU-R BT470M / ITU-R BT1700 625 PAL & SECAM
Definition: pixfmt.h:473
static void uninit(AVFilterContext *ctx)
yuv2rgb_fn yuv2rgb
const struct ColorPrimaries * out_primaries
#define DECLARE_ALIGNED(n, t, v)
Declare a variable that is aligned in memory.
Definition: mem.h:112
Colorspace
Definition: vf_colorspace.c:44
ptrdiff_t out_linesize[3]
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:79
Not part of ABI.
Definition: pixfmt.h:524
AVColorRange
MPEG vs JPEG YUV range.
Definition: pixfmt.h:520
ColorSpaceDSPContext dsp
const struct LumaCoefficients * ff_get_luma_coefficients(enum AVColorSpace csp)
Definition: colorspace.c:128
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:392
const struct ColorPrimaries * in_primaries
AVColorPrimaries
Chromaticity coordinates of the source primaries.
Definition: pixfmt.h:443
#define FFALIGN(x, a)
Definition: macros.h:48
#define av_log(a,...)
A filter pad used for either input or output.
Definition: internal.h:54
enum AVColorSpace in_csp out_csp user_csp user_icsp
ptrdiff_t rgb_stride
also FCC Title 47 Code of Federal Regulations 73.682 (a)(20)
Definition: pixfmt.h:448
int width
Definition: frame.h:353
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
int ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:569
#define td
Definition: regdef.h:70
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:101
double rgb2yuv_dbl_coeffs[3][3]
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:202
void(* yuv2yuv_fn)(uint8_t *yuv_out[3], const ptrdiff_t yuv_out_stride[3], uint8_t *yuv_in[3], const ptrdiff_t yuv_in_stride[3], int w, int h, const int16_t yuv2yuv_coeffs[3][3][8], const int16_t yuv_offset[2][8])
Definition: colorspacedsp.h:40
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:186
const struct LumaCoefficients * out_lumacoef
static const uint8_t dither[8][8]
Definition: vf_fspp.c:57
void * priv
private data for use by the filter
Definition: avfilter.h:353
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: frame.h:539
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:116
Not part of ABI.
Definition: pixfmt.h:461
enum AVColorSpace colorspace
YUV colorspace type.
Definition: frame.h:550
also ITU-R BT1361 / IEC 61966-2-4 / SMPTE RP177 Annex B
Definition: pixfmt.h:445
simple assert() macros that are a bit more flexible than ISO C assert().
int ff_add_format(AVFilterFormats **avff, int64_t fmt)
Add fmt to the list of media formats contained in *avff.
Definition: formats.c:336
SMPTE ST 428-1 (CIE 1931 XYZ)
Definition: pixfmt.h:455
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:390
uint8_t bits
Definition: vp3data.h:202
static int create_filtergraph(AVFilterContext *ctx, const AVFrame *in, const AVFrame *out)
int av_frame_copy(AVFrame *dst, const AVFrame *src)
Copy the frame data from src to dst.
Definition: frame.c:792
static const AVFilterPad inputs[]
const char * av_color_primaries_name(enum AVColorPrimaries primaries)
Definition: pixdesc.c:2867
#define supported_format(d)
static void apply_lut(int16_t *buf[3], ptrdiff_t stride, int w, int h, const int16_t *lut)
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
struct PrimaryCoefficients coeff
#define ENUM(x, y, z)
static const struct TransferCharacteristics transfer_characteristics[AVCOL_TRC_NB]
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
Definition: avfilter.c:802
#define FFMIN(a, b)
Definition: common.h:96
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
colour filters using Illuminant C
Definition: pixfmt.h:453
int ff_formats_ref(AVFilterFormats *f, AVFilterFormats **ref)
Add *ref as a new reference to formats.
Definition: formats.c:439
uint8_t w
Definition: llviddspenc.c:38
static enum AVColorSpace default_csp[CS_NB+1]
Definition: vf_colorspace.c:99
ITU-R BT2020 non-constant luminance system.
Definition: pixfmt.h:508
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM
Definition: pixfmt.h:450
static const struct ColorPrimaries color_primaries[AVCOL_PRI_NB]
AVFormatContext * ctx
Definition: movenc.c:48
static const AVFilterPad outputs[]
int16_t * rgb[3]
int16_t lrgb2lrgb_coeffs[3][3][8]
#define s(width, name)
Definition: cbs_vp9.c:257
int n
Definition: avisynth_c.h:760
#define FLAGS
FCC Title 47 Code of Federal Regulations 73.682 (a)(20)
Definition: pixfmt.h:502
the normal 2^n-1 "JPEG" YUV ranges
Definition: pixfmt.h:523
planar GBR 4:4:4:4 48bpp, little-endian
Definition: pixfmt.h:288
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
enum AVColorPrimaries in_prm out_prm user_prm user_iprm
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:368
also ITU-R BT1361
Definition: pixfmt.h:470
static void fill_whitepoint_conv_table(double out[3][3], enum WhitepointAdaptation wp_adapt, enum Whitepoint src, enum Whitepoint dst)
void ff_fill_rgb2yuv_table(const struct LumaCoefficients *coeffs, double rgb2yuv[3][3])
Definition: colorspace.c:141
also ITU-R BT601-6 525 or 625 / ITU-R BT1358 525 or 625 / ITU-R BT1700 NTSC
Definition: pixfmt.h:475
static int query_formats(AVFilterContext *ctx)
int16_t yuv2yuv_coeffs[3][3][8]
functionally identical to above
Definition: pixfmt.h:452
Used for passing data between threads.
Definition: dsddec.c:64
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:326
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
static const int16_t alpha[]
Definition: ilbcdata.h:55
rgb2yuv_fsb_fn rgb2yuv_fsb
WhitepointAdaptation
Definition: vf_colorspace.c:65
yuv2yuv_fn yuv2yuv[NB_BPP][NB_BPP][NB_SS]
Definition: colorspacedsp.h:70
static int fill_gamma_table(ColorSpaceContext *s)
void * buf
Definition: avisynth_c.h:766
Whitepoint
Definition: vf_colorspace.c:57
rgb2yuv_fn rgb2yuv[NB_BPP][NB_SS]
Definition: colorspacedsp.h:65
int * dither_scratch_base[3][2]
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31))))#define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac){}void ff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map){AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);return NULL;}return ac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;}int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){int use_generic=1;int len=in->nb_samples;int p;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:387
Describe the class of an AVClass context structure.
Definition: log.h:67
Filter definition.
Definition: avfilter.h:144
yuv2rgb_fn yuv2rgb[NB_BPP][NB_SS]
Definition: colorspacedsp.h:62
Not part of ABI.
Definition: pixfmt.h:490
const struct LumaCoefficients * in_lumacoef
void ff_matrix_invert_3x3(const double in[3][3], double out[3][3])
Definition: colorspace.c:27
const char * name
Filter name.
Definition: avfilter.h:148
void(* rgb2yuv_fsb_fn)(uint8_t *yuv[3], const ptrdiff_t yuv_stride[3], int16_t *rgb[3], ptrdiff_t rgb_stride, int w, int h, const int16_t rgb2yuv_coeffs[3][3][8], const int16_t yuv_offset[8], int *rnd[3][2])
Definition: colorspacedsp.h:35
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a link
static enum AVColorTransferCharacteristic default_trc[CS_NB+1]
Definition: vf_colorspace.c:73
AVFilterLink ** outputs
array of pointers to output links
Definition: avfilter.h:350
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:275
#define flags(name, subs,...)
Definition: cbs_av1.c:561
AVFilterInternal * internal
An opaque struct for libavfilter internal use.
Definition: avfilter.h:378
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:388
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:394
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:309
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
the normal 219*2^(n-8) "MPEG" YUV ranges
Definition: pixfmt.h:522
void(* rgb2yuv_fn)(uint8_t *yuv[3], const ptrdiff_t yuv_stride[3], int16_t *rgb[3], ptrdiff_t rgb_stride, int w, int h, const int16_t rgb2yuv_coeffs[3][3][8], const int16_t yuv_offset[8])
Definition: colorspacedsp.h:31
GLint GLenum GLboolean GLsizei stride
Definition: opengl_enc.c:104
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
IEC 61966-2-1 (sRGB or sYCC)
Definition: pixfmt.h:482
enum WhitepointAdaptation wp_adapt
enum AVColorRange in_rng out_rng user_rng user_irng
void ff_colorspacedsp_init(ColorSpaceDSPContext *dsp)
const char * av_color_transfer_name(enum AVColorTransferCharacteristic transfer)
Definition: pixdesc.c:2891
also ITU-R BT470BG
Definition: pixfmt.h:474
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:80
void(* yuv2rgb_fn)(int16_t *rgb[3], ptrdiff_t rgb_stride, uint8_t *yuv[3], const ptrdiff_t yuv_stride[3], int w, int h, const int16_t yuv2rgb_coeffs[3][3][8], const int16_t yuv_offset[8])
Definition: colorspacedsp.h:27
enum AVPixelFormat in_format user_format
static int convert(AVFilterContext *ctx, void *data, int job_nr, int n_jobs)
avfilter_execute_func * execute
Definition: internal.h:155
static const struct TransferCharacteristics * get_transfer_characteristics(enum AVColorTransferCharacteristic trc)
int16_t rgb2yuv_coeffs[3][3][8]
pixel format definitions
void ff_matrix_mul_3x3(double dst[3][3], const double src1[3][3], const double src2[3][3])
Definition: colorspace.c:54
const struct TransferCharacteristics * in_txchr
const struct TransferCharacteristics * out_txchr
A list of supported formats for one end of a filter link.
Definition: formats.h:64
#define lrint
Definition: tablegen.h:53
enum AVColorPrimaries color_primaries
Definition: frame.h:541
An instance of a filter.
Definition: avfilter.h:338
AVFILTER_DEFINE_CLASS(colorspace)
ITU-R BT2020 for 10-bit system.
Definition: pixfmt.h:483
static const struct WhitepointCoefficients whitepoint_coefficients[WP_NB]
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC
Definition: pixfmt.h:451
ITU-R BT2020.
Definition: pixfmt.h:454
int height
Definition: frame.h:353
FILE * out
Definition: movenc.c:54
#define av_freep(p)
enum AVColorTransferCharacteristic color_trc
Definition: frame.h:543
AVFrame * in
Definition: af_afftdn.c:1083
formats
Definition: signature.h:48
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2438
#define stride
internal API functions
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
int depth
Number of bits in the component.
Definition: pixdesc.h:58
static int filter_frame(AVFilterLink *link, AVFrame *in)
static int config_props(AVFilterLink *outlink)
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
rgb2yuv_fsb_fn rgb2yuv_fsb[NB_BPP][NB_SS]
Definition: colorspacedsp.h:67
Not part of ABI.
Definition: pixfmt.h:514
DitherMode
Definition: vf_colorspace.c:38
yuv2yuv_fn yuv2yuv
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:654
void ff_fill_rgb2xyz_table(const struct PrimaryCoefficients *coeffs, const struct WhitepointCoefficients *wp, double rgb2xyz[3][3])
Definition: colorspace.c:68
#define OFFSET(x)
static uint8_t tmp[11]
Definition: aes_ctr.c:26