FFmpeg
vf_colorspace.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2016 Ronald S. Bultje <rsbultje@gmail.com>
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /*
22  * @file
23  * Convert between colorspaces.
24  */
25 
26 #include "libavutil/avassert.h"
27 #include "libavutil/csp.h"
28 #include "libavutil/frame.h"
29 #include "libavutil/mem.h"
30 #include "libavutil/mem_internal.h"
31 #include "libavutil/opt.h"
32 #include "libavutil/pixdesc.h"
33 #include "libavutil/pixfmt.h"
34 
35 #include "avfilter.h"
36 #include "colorspacedsp.h"
37 #include "filters.h"
38 #include "formats.h"
39 #include "video.h"
40 #include "colorspace.h"
41 
42 enum DitherMode {
46 };
47 
48 enum Colorspace {
59 };
60 
67 };
68 
73 };
74 
86 };
87 
88 static const enum AVColorPrimaries default_prm[CS_NB + 1] = {
99 };
100 
101 static const enum AVColorSpace default_csp[CS_NB + 1] = {
112 };
113 
115  double alpha, beta, gamma, delta;
116 };
117 
118 typedef struct ColorSpaceContext {
119  const AVClass *class;
120 
122 
123  enum Colorspace user_all, user_iall;
124  enum AVColorSpace in_csp, out_csp, user_csp, user_icsp;
125  enum AVColorRange in_rng, out_rng, user_rng, user_irng;
126  enum AVColorTransferCharacteristic in_trc, out_trc, user_trc, user_itrc;
127  enum AVColorPrimaries in_prm, out_prm, user_prm, user_iprm;
128  enum AVPixelFormat in_format, user_format;
130  /* enum DitherMode */
131  int dither;
132  /* enum WhitepointAdaptation */
133  int wp_adapt;
134  /* enum ClipGamutMode */
136 
137  int16_t *rgb[3];
138  ptrdiff_t rgb_stride;
139  unsigned rgb_sz;
141 
144  DECLARE_ALIGNED(16, int16_t, lrgb2lrgb_coeffs)[3][3][8];
145 
148  int16_t *lin_lut, *delin_lut;
149 
152  DECLARE_ALIGNED(16, int16_t, yuv2rgb_coeffs)[3][3][8];
153  DECLARE_ALIGNED(16, int16_t, rgb2yuv_coeffs)[3][3][8];
154  DECLARE_ALIGNED(16, int16_t, yuv2yuv_coeffs)[3][3][8];
155  DECLARE_ALIGNED(16, int16_t, yuv_offset)[2 /* in, out */][8];
162 
165 
166 // FIXME deal with odd width/heights
167 // FIXME faster linearize/delinearize implementation (integer pow)
168 // FIXME bt2020cl support (linearization between yuv/rgb step instead of between rgb/xyz)
169 // FIXME test that the values in (de)lin_lut don't exceed their container storage
170 // type size (only useful if we keep the LUT and don't move to fast integer pow)
171 // FIXME dithering if bitdepth goes down?
172 // FIXME bitexact for fate integration?
173 
174 // FIXME I'm pretty sure gamma22/28 also have a linear toe slope, but I can't
175 // find any actual tables that document their real values...
176 // See http://www.13thmonkey.org/~boris/gammacorrection/ first graph why it matters
178  [AVCOL_TRC_BT709] = { 1.099, 0.018, 0.45, 4.5 },
179  [AVCOL_TRC_GAMMA22] = { 1.0, 0.0, 1.0 / 2.2, 0.0 },
180  [AVCOL_TRC_GAMMA28] = { 1.0, 0.0, 1.0 / 2.8, 0.0 },
181  [AVCOL_TRC_SMPTE170M] = { 1.099, 0.018, 0.45, 4.5 },
182  [AVCOL_TRC_SMPTE240M] = { 1.1115, 0.0228, 0.45, 4.0 },
183  [AVCOL_TRC_LINEAR] = { 1.0, 0.0, 1.0, 0.0 },
184  [AVCOL_TRC_IEC61966_2_1] = { 1.055, 0.0031308, 1.0 / 2.4, 12.92 },
185  [AVCOL_TRC_IEC61966_2_4] = { 1.099, 0.018, 0.45, 4.5 },
186  [AVCOL_TRC_BT2020_10] = { 1.099, 0.018, 0.45, 4.5 },
187  [AVCOL_TRC_BT2020_12] = { 1.0993, 0.0181, 0.45, 4.5 },
188 };
189 
190 static const struct TransferCharacteristics *
192 {
193  const struct TransferCharacteristics *coeffs;
194 
195  if ((unsigned)trc >= FF_ARRAY_ELEMS(transfer_characteristics))
196  return NULL;
197  coeffs = &transfer_characteristics[trc];
198  if (!coeffs->alpha)
199  return NULL;
200 
201  return coeffs;
202 }
203 
205 {
206  int n;
207  double in_alpha = s->in_txchr->alpha, in_beta = s->in_txchr->beta;
208  double in_gamma = s->in_txchr->gamma, in_delta = s->in_txchr->delta;
209  double in_ialpha = 1.0 / in_alpha, in_igamma = 1.0 / in_gamma, in_idelta = 1.0 / in_delta;
210  double out_alpha = s->out_txchr->alpha, out_beta = s->out_txchr->beta;
211  double out_gamma = s->out_txchr->gamma, out_delta = s->out_txchr->delta;
212  int clip_gamut = s->clip_gamut == CLIP_GAMUT_RGB;
213 
214  s->lin_lut = av_malloc(sizeof(*s->lin_lut) * 32768 * 2);
215  if (!s->lin_lut)
216  return AVERROR(ENOMEM);
217  s->delin_lut = &s->lin_lut[32768];
218  for (n = 0; n < 32768; n++) {
219  double v = (n - 2048.0) / 28672.0, d, l;
220 
221  // delinearize
222  if (v <= -out_beta) {
223  d = -out_alpha * pow(-v, out_gamma) + (out_alpha - 1.0);
224  } else if (v < out_beta) {
225  d = out_delta * v;
226  } else {
227  d = out_alpha * pow(v, out_gamma) - (out_alpha - 1.0);
228  }
229  int d_rounded = lrint(d * 28672.0);
230  s->delin_lut[n] = clip_gamut ? av_clip(d_rounded, 0, 28672)
231  : av_clip_int16(d_rounded);
232 
233  // linearize
234  if (v <= -in_beta * in_delta) {
235  l = -pow((1.0 - in_alpha - v) * in_ialpha, in_igamma);
236  } else if (v < in_beta * in_delta) {
237  l = v * in_idelta;
238  } else {
239  l = pow((v + in_alpha - 1.0) * in_ialpha, in_igamma);
240  }
241  int l_rounded = lrint(l * 28672.0);
242  s->lin_lut[n] = clip_gamut ? av_clip(l_rounded, 0, 28672)
243  : av_clip_int16(l_rounded);
244  }
245 
246  return 0;
247 }
248 
249 /*
250  * See http://www.brucelindbloom.com/index.html?Eqn_ChromAdapt.html
251  * This function uses the Bradford mechanism.
252  */
253 static void fill_whitepoint_conv_table(double out[3][3], enum WhitepointAdaptation wp_adapt,
254  const AVWhitepointCoefficients *wp_src,
255  const AVWhitepointCoefficients *wp_dst)
256 {
257  static const double ma_tbl[NB_WP_ADAPT_NON_IDENTITY][3][3] = {
258  [WP_ADAPT_BRADFORD] = {
259  { 0.8951, 0.2664, -0.1614 },
260  { -0.7502, 1.7135, 0.0367 },
261  { 0.0389, -0.0685, 1.0296 },
262  }, [WP_ADAPT_VON_KRIES] = {
263  { 0.40024, 0.70760, -0.08081 },
264  { -0.22630, 1.16532, 0.04570 },
265  { 0.00000, 0.00000, 0.91822 },
266  },
267  };
268  const double (*ma)[3] = ma_tbl[wp_adapt];
269  double xw_src = av_q2d(wp_src->x), yw_src = av_q2d(wp_src->y);
270  double xw_dst = av_q2d(wp_dst->x), yw_dst = av_q2d(wp_dst->y);
271  double zw_src = 1.0 - xw_src - yw_src;
272  double zw_dst = 1.0 - xw_dst - yw_dst;
273  double mai[3][3], fac[3][3], tmp[3][3];
274  double rs, gs, bs, rd, gd, bd;
275 
276  ff_matrix_invert_3x3(ma, mai);
277  rs = ma[0][0] * xw_src + ma[0][1] * yw_src + ma[0][2] * zw_src;
278  gs = ma[1][0] * xw_src + ma[1][1] * yw_src + ma[1][2] * zw_src;
279  bs = ma[2][0] * xw_src + ma[2][1] * yw_src + ma[2][2] * zw_src;
280  rd = ma[0][0] * xw_dst + ma[0][1] * yw_dst + ma[0][2] * zw_dst;
281  gd = ma[1][0] * xw_dst + ma[1][1] * yw_dst + ma[1][2] * zw_dst;
282  bd = ma[2][0] * xw_dst + ma[2][1] * yw_dst + ma[2][2] * zw_dst;
283  fac[0][0] = rd / rs;
284  fac[1][1] = gd / gs;
285  fac[2][2] = bd / bs;
286  fac[0][1] = fac[0][2] = fac[1][0] = fac[1][2] = fac[2][0] = fac[2][1] = 0.0;
287  ff_matrix_mul_3x3(tmp, ma, fac);
288  ff_matrix_mul_3x3(out, tmp, mai);
289 }
290 
291 static void apply_lut(int16_t *buf[3], ptrdiff_t stride,
292  int w, int h, const int16_t *lut)
293 {
294  int y, x, n;
295 
296  for (n = 0; n < 3; n++) {
297  int16_t *data = buf[n];
298 
299  for (y = 0; y < h; y++) {
300  for (x = 0; x < w; x++)
301  data[x] = lut[av_clip_uintp2(2048 + data[x], 15)];
302 
303  data += stride;
304  }
305  }
306 }
307 
308 typedef struct ThreadData {
309  AVFrame *in, *out;
310  ptrdiff_t in_linesize[3], out_linesize[3];
312 } ThreadData;
313 
314 static int convert(AVFilterContext *ctx, void *data, int job_nr, int n_jobs)
315 {
316  const ThreadData *td = data;
317  ColorSpaceContext *s = ctx->priv;
318  uint8_t *in_data[3], *out_data[3];
319  int16_t *rgb[3];
320  int h_in = (td->in->height + 1) >> 1;
321  int h1 = 2 * (job_nr * h_in / n_jobs), h2 = 2 * ((job_nr + 1) * h_in / n_jobs);
322  int w = td->in->width, h = h2 - h1;
323 
324  in_data[0] = td->in->data[0] + td->in_linesize[0] * h1;
325  in_data[1] = td->in->data[1] + td->in_linesize[1] * (h1 >> td->in_ss_h);
326  in_data[2] = td->in->data[2] + td->in_linesize[2] * (h1 >> td->in_ss_h);
327  out_data[0] = td->out->data[0] + td->out_linesize[0] * h1;
328  out_data[1] = td->out->data[1] + td->out_linesize[1] * (h1 >> td->out_ss_h);
329  out_data[2] = td->out->data[2] + td->out_linesize[2] * (h1 >> td->out_ss_h);
330  rgb[0] = s->rgb[0] + s->rgb_stride * h1;
331  rgb[1] = s->rgb[1] + s->rgb_stride * h1;
332  rgb[2] = s->rgb[2] + s->rgb_stride * h1;
333 
334  // FIXME for simd, also make sure we do pictures with negative stride
335  // top-down so we don't overwrite lines with padding of data before it
336  // in the same buffer (same as swscale)
337 
338  if (s->yuv2yuv_fastmode) {
339  // FIXME possibly use a fast mode in case only the y range changes?
340  // since in that case, only the diagonal entries in yuv2yuv_coeffs[]
341  // are non-zero
342  s->yuv2yuv(out_data, td->out_linesize, in_data, td->in_linesize, w, h,
343  s->yuv2yuv_coeffs, s->yuv_offset);
344  } else {
345  // FIXME maybe (for caching efficiency) do pipeline per-line instead of
346  // full buffer per function? (Or, since yuv2rgb requires 2 lines: per
347  // 2 lines, for yuv420.)
348  /*
349  * General design:
350  * - yuv2rgb converts from whatever range the input was ([16-235/240] or
351  * [0,255] or the 10/12bpp equivalents thereof) to an integer version
352  * of RGB in psuedo-restricted 15+sign bits. That means that the float
353  * range [0.0,1.0] is in [0,28762], and the remainder of the int16_t
354  * range is used for overflow/underflow outside the representable
355  * range of this RGB type. rgb2yuv is the exact opposite.
356  * - gamma correction is done using a LUT since that appears to work
357  * fairly fast.
358  * - If the input is chroma-subsampled (420/422), the yuv2rgb conversion
359  * (or rgb2yuv conversion) uses nearest-neighbour sampling to read
360  * read chroma pixels at luma resolution. If you want some more fancy
361  * filter, you can use swscale to convert to yuv444p.
362  * - all coefficients are 14bit (so in the [-2.0,2.0] range).
363  */
364  s->yuv2rgb(rgb, s->rgb_stride, in_data, td->in_linesize, w, h,
365  s->yuv2rgb_coeffs, s->yuv_offset[0]);
366  if (!s->rgb2rgb_passthrough) {
367  apply_lut(rgb, s->rgb_stride, w, h, s->lin_lut);
368  if (!s->lrgb2lrgb_passthrough)
369  s->dsp.multiply3x3(rgb, s->rgb_stride, w, h, s->lrgb2lrgb_coeffs);
370  apply_lut(rgb, s->rgb_stride, w, h, s->delin_lut);
371  }
372  if (s->dither == DITHER_FSB) {
373  s->rgb2yuv_fsb(out_data, td->out_linesize, rgb, s->rgb_stride, w, h,
374  s->rgb2yuv_coeffs, s->yuv_offset[1], s->dither_scratch);
375  } else {
376  s->rgb2yuv(out_data, td->out_linesize, rgb, s->rgb_stride, w, h,
377  s->rgb2yuv_coeffs, s->yuv_offset[1]);
378  }
379  }
380 
381  return 0;
382 }
383 
384 static int get_range_off(AVFilterContext *ctx, int *off,
385  int *y_rng, int *uv_rng,
386  enum AVColorRange rng, int depth)
387 {
388  switch (rng) {
390  ColorSpaceContext *s = ctx->priv;
391 
392  if (!s->did_warn_range) {
393  av_log(ctx, AV_LOG_WARNING, "Input range not set, assuming tv/mpeg\n");
394  s->did_warn_range = 1;
395  }
396  }
397  // fall-through
398  case AVCOL_RANGE_MPEG:
399  *off = 16 << (depth - 8);
400  *y_rng = 219 << (depth - 8);
401  *uv_rng = 224 << (depth - 8);
402  break;
403  case AVCOL_RANGE_JPEG:
404  *off = 0;
405  *y_rng = *uv_rng = (256 << (depth - 8)) - 1;
406  break;
407  default:
408  return AVERROR(EINVAL);
409  }
410 
411  return 0;
412 }
413 
415  const AVFrame *in, const AVFrame *out)
416 {
417  ColorSpaceContext *s = ctx->priv;
418  const AVPixFmtDescriptor *in_desc = av_pix_fmt_desc_get(in->format);
419  const AVPixFmtDescriptor *out_desc = av_pix_fmt_desc_get(out->format);
420  int m, n, o, res, fmt_identical, redo_yuv2rgb = 0, redo_rgb2yuv = 0;
421 
422 #define supported_depth(d) ((d) == 8 || (d) == 10 || (d) == 12)
423 #define supported_subsampling(lcw, lch) \
424  (((lcw) == 0 && (lch) == 0) || ((lcw) == 1 && (lch) == 0) || ((lcw) == 1 && (lch) == 1))
425 #define supported_format(d) \
426  ((d) != NULL && (d)->nb_components == 3 && \
427  !((d)->flags & AV_PIX_FMT_FLAG_RGB) && \
428  supported_depth((d)->comp[0].depth) && \
429  supported_subsampling((d)->log2_chroma_w, (d)->log2_chroma_h))
430 
431  if (!supported_format(in_desc)) {
433  "Unsupported input format %d (%s) or bitdepth (%d)\n",
435  in_desc ? in_desc->comp[0].depth : -1);
436  return AVERROR(EINVAL);
437  }
438  if (!supported_format(out_desc)) {
440  "Unsupported output format %d (%s) or bitdepth (%d)\n",
441  out->format, av_get_pix_fmt_name(out->format),
442  out_desc ? out_desc->comp[0].depth : -1);
443  return AVERROR(EINVAL);
444  }
445 
446  if (in->color_primaries != s->in_prm) s->in_primaries = NULL;
447  if (out->color_primaries != s->out_prm) s->out_primaries = NULL;
448  if (in->color_trc != s->in_trc) s->in_txchr = NULL;
449  if (out->color_trc != s->out_trc) s->out_txchr = NULL;
450  if (in->colorspace != s->in_csp ||
451  in->color_range != s->in_rng) s->in_lumacoef = NULL;
452  if (out->color_range != s->out_rng) s->rgb2yuv = NULL;
453 
454  if (!s->out_primaries || !s->in_primaries) {
455  s->in_prm = in->color_primaries;
456  if (s->user_iall != CS_UNSPECIFIED)
457  s->in_prm = default_prm[FFMIN(s->user_iall, CS_NB)];
458  if (s->user_iprm != AVCOL_PRI_UNSPECIFIED)
459  s->in_prm = s->user_iprm;
460  s->in_primaries = av_csp_primaries_desc_from_id(s->in_prm);
461  if (!s->in_primaries) {
463  "Unsupported input primaries %d (%s)\n",
464  s->in_prm, av_color_primaries_name(s->in_prm));
465  return AVERROR(EINVAL);
466  }
467  s->out_prm = out->color_primaries;
468  s->out_primaries = av_csp_primaries_desc_from_id(s->out_prm);
469  if (!s->out_primaries) {
470  if (s->out_prm == AVCOL_PRI_UNSPECIFIED) {
471  if (s->user_all == CS_UNSPECIFIED) {
472  av_log(ctx, AV_LOG_ERROR, "Please specify output primaries\n");
473  } else {
475  "Unsupported output color property %d\n", s->user_all);
476  }
477  } else {
479  "Unsupported output primaries %d (%s)\n",
480  s->out_prm, av_color_primaries_name(s->out_prm));
481  }
482  return AVERROR(EINVAL);
483  }
484  s->lrgb2lrgb_passthrough = !memcmp(s->in_primaries, s->out_primaries,
485  sizeof(*s->in_primaries));
486  if (!s->lrgb2lrgb_passthrough) {
487  double rgb2xyz[3][3], xyz2rgb[3][3], rgb2rgb[3][3];
488  const AVWhitepointCoefficients *wp_out, *wp_in;
489 
490  wp_out = &s->out_primaries->wp;
491  wp_in = &s->in_primaries->wp;
492  ff_fill_rgb2xyz_table(&s->out_primaries->prim, wp_out, rgb2xyz);
493  ff_matrix_invert_3x3(rgb2xyz, xyz2rgb);
494  ff_fill_rgb2xyz_table(&s->in_primaries->prim, wp_in, rgb2xyz);
495  if (memcmp(wp_in, wp_out, sizeof(*wp_in)) != 0 &&
496  s->wp_adapt != WP_ADAPT_IDENTITY) {
497  double wpconv[3][3], tmp[3][3];
498 
499  fill_whitepoint_conv_table(wpconv, s->wp_adapt, &s->in_primaries->wp,
500  &s->out_primaries->wp);
501  ff_matrix_mul_3x3(tmp, rgb2xyz, wpconv);
502  ff_matrix_mul_3x3(rgb2rgb, tmp, xyz2rgb);
503  } else {
504  ff_matrix_mul_3x3(rgb2rgb, rgb2xyz, xyz2rgb);
505  }
506  for (m = 0; m < 3; m++)
507  for (n = 0; n < 3; n++) {
508  s->lrgb2lrgb_coeffs[m][n][0] = lrint(16384.0 * rgb2rgb[m][n]);
509  for (o = 1; o < 8; o++)
510  s->lrgb2lrgb_coeffs[m][n][o] = s->lrgb2lrgb_coeffs[m][n][0];
511  }
512 
513  }
514  }
515 
516  if (!s->in_txchr) {
517  av_freep(&s->lin_lut);
518  s->in_trc = in->color_trc;
519  if (s->user_iall != CS_UNSPECIFIED)
520  s->in_trc = default_trc[FFMIN(s->user_iall, CS_NB)];
521  if (s->user_itrc != AVCOL_TRC_UNSPECIFIED)
522  s->in_trc = s->user_itrc;
523  s->in_txchr = get_transfer_characteristics(s->in_trc);
524  if (!s->in_txchr) {
526  "Unsupported input transfer characteristics %d (%s)\n",
527  s->in_trc, av_color_transfer_name(s->in_trc));
528  return AVERROR(EINVAL);
529  }
530  }
531 
532  if (!s->out_txchr) {
533  av_freep(&s->lin_lut);
534  s->out_trc = out->color_trc;
535  s->out_txchr = get_transfer_characteristics(s->out_trc);
536  if (!s->out_txchr) {
537  if (s->out_trc == AVCOL_TRC_UNSPECIFIED) {
538  if (s->user_all == CS_UNSPECIFIED) {
540  "Please specify output transfer characteristics\n");
541  } else {
543  "Unsupported output color property %d\n", s->user_all);
544  }
545  } else {
547  "Unsupported output transfer characteristics %d (%s)\n",
548  s->out_trc, av_color_transfer_name(s->out_trc));
549  }
550  return AVERROR(EINVAL);
551  }
552  }
553 
554  s->rgb2rgb_passthrough = s->fast_mode || (s->lrgb2lrgb_passthrough &&
555  !memcmp(s->in_txchr, s->out_txchr, sizeof(*s->in_txchr)));
556  if (!s->rgb2rgb_passthrough && !s->lin_lut) {
557  res = fill_gamma_table(s);
558  if (res < 0)
559  return res;
560  }
561 
562  if (!s->in_lumacoef) {
563  s->in_csp = in->colorspace;
564  if (s->user_iall != CS_UNSPECIFIED)
565  s->in_csp = default_csp[FFMIN(s->user_iall, CS_NB)];
566  if (s->user_icsp != AVCOL_SPC_UNSPECIFIED)
567  s->in_csp = s->user_icsp;
568  s->in_rng = in->color_range;
569  if (s->user_irng != AVCOL_RANGE_UNSPECIFIED)
570  s->in_rng = s->user_irng;
571  s->in_lumacoef = av_csp_luma_coeffs_from_avcsp(s->in_csp);
572  if (!s->in_lumacoef) {
574  "Unsupported input colorspace %d (%s)\n",
575  s->in_csp, av_color_space_name(s->in_csp));
576  return AVERROR(EINVAL);
577  }
578  redo_yuv2rgb = 1;
579  }
580 
581  if (!s->rgb2yuv) {
582  s->out_rng = out->color_range;
583  redo_rgb2yuv = 1;
584  }
585 
586  fmt_identical = in_desc->log2_chroma_h == out_desc->log2_chroma_h &&
587  in_desc->log2_chroma_w == out_desc->log2_chroma_w;
588  s->yuv2yuv_fastmode = s->rgb2rgb_passthrough && fmt_identical;
589  s->yuv2yuv_passthrough = s->yuv2yuv_fastmode && s->in_rng == s->out_rng &&
590  !memcmp(s->in_lumacoef, s->out_lumacoef,
591  sizeof(*s->in_lumacoef)) &&
592  in_desc->comp[0].depth == out_desc->comp[0].depth;
593  if (!s->yuv2yuv_passthrough) {
594  if (redo_yuv2rgb) {
595  double rgb2yuv[3][3], (*yuv2rgb)[3] = s->yuv2rgb_dbl_coeffs;
596  int off, bits, in_rng;
597 
598  res = get_range_off(ctx, &off, &s->in_y_rng, &s->in_uv_rng,
599  s->in_rng, in_desc->comp[0].depth);
600  if (res < 0) {
602  "Unsupported input color range %d (%s)\n",
603  s->in_rng, av_color_range_name(s->in_rng));
604  return res;
605  }
606  for (n = 0; n < 8; n++)
607  s->yuv_offset[0][n] = off;
608  ff_fill_rgb2yuv_table(s->in_lumacoef, rgb2yuv);
610  bits = 1 << (in_desc->comp[0].depth - 1);
611  for (n = 0; n < 3; n++) {
612  for (in_rng = s->in_y_rng, m = 0; m < 3; m++, in_rng = s->in_uv_rng) {
613  s->yuv2rgb_coeffs[n][m][0] = lrint(28672 * bits * yuv2rgb[n][m] / in_rng);
614  for (o = 1; o < 8; o++)
615  s->yuv2rgb_coeffs[n][m][o] = s->yuv2rgb_coeffs[n][m][0];
616  }
617  }
618  av_assert2(s->yuv2rgb_coeffs[0][1][0] == 0);
619  av_assert2(s->yuv2rgb_coeffs[2][2][0] == 0);
620  av_assert2(s->yuv2rgb_coeffs[0][0][0] == s->yuv2rgb_coeffs[1][0][0]);
621  av_assert2(s->yuv2rgb_coeffs[0][0][0] == s->yuv2rgb_coeffs[2][0][0]);
622  s->yuv2rgb = s->dsp.yuv2rgb[(in_desc->comp[0].depth - 8) >> 1]
623  [in_desc->log2_chroma_h + in_desc->log2_chroma_w];
624  }
625 
626  if (redo_rgb2yuv) {
627  double (*rgb2yuv)[3] = s->rgb2yuv_dbl_coeffs;
628  int off, out_rng, bits;
629 
630  res = get_range_off(ctx, &off, &s->out_y_rng, &s->out_uv_rng,
631  s->out_rng, out_desc->comp[0].depth);
632  if (res < 0) {
634  "Unsupported output color range %d (%s)\n",
635  s->out_rng, av_color_range_name(s->out_rng));
636  return res;
637  }
638  for (n = 0; n < 8; n++)
639  s->yuv_offset[1][n] = off;
640  ff_fill_rgb2yuv_table(s->out_lumacoef, rgb2yuv);
641  bits = 1 << (29 - out_desc->comp[0].depth);
642  for (out_rng = s->out_y_rng, n = 0; n < 3; n++, out_rng = s->out_uv_rng) {
643  for (m = 0; m < 3; m++) {
644  s->rgb2yuv_coeffs[n][m][0] = lrint(bits * out_rng * rgb2yuv[n][m] / 28672);
645  for (o = 1; o < 8; o++)
646  s->rgb2yuv_coeffs[n][m][o] = s->rgb2yuv_coeffs[n][m][0];
647  }
648  }
649  av_assert2(s->rgb2yuv_coeffs[1][2][0] == s->rgb2yuv_coeffs[2][0][0]);
650  s->rgb2yuv = s->dsp.rgb2yuv[(out_desc->comp[0].depth - 8) >> 1]
651  [out_desc->log2_chroma_h + out_desc->log2_chroma_w];
652  s->rgb2yuv_fsb = s->dsp.rgb2yuv_fsb[(out_desc->comp[0].depth - 8) >> 1]
653  [out_desc->log2_chroma_h + out_desc->log2_chroma_w];
654  }
655 
656  if (s->yuv2yuv_fastmode && (redo_yuv2rgb || redo_rgb2yuv)) {
657  int idepth = in_desc->comp[0].depth, odepth = out_desc->comp[0].depth;
658  double (*rgb2yuv)[3] = s->rgb2yuv_dbl_coeffs;
659  double (*yuv2rgb)[3] = s->yuv2rgb_dbl_coeffs;
660  double yuv2yuv[3][3];
661  int in_rng, out_rng;
662 
664  for (out_rng = s->out_y_rng, m = 0; m < 3; m++, out_rng = s->out_uv_rng) {
665  for (in_rng = s->in_y_rng, n = 0; n < 3; n++, in_rng = s->in_uv_rng) {
666  s->yuv2yuv_coeffs[m][n][0] =
667  lrint(16384 * yuv2yuv[m][n] * out_rng * (1 << idepth) /
668  (in_rng * (1 << odepth)));
669  for (o = 1; o < 8; o++)
670  s->yuv2yuv_coeffs[m][n][o] = s->yuv2yuv_coeffs[m][n][0];
671  }
672  }
673  av_assert2(s->yuv2yuv_coeffs[1][0][0] == 0);
674  av_assert2(s->yuv2yuv_coeffs[2][0][0] == 0);
675  s->yuv2yuv = s->dsp.yuv2yuv[(idepth - 8) >> 1][(odepth - 8) >> 1]
676  [in_desc->log2_chroma_h + in_desc->log2_chroma_w];
677  }
678  }
679 
680  return 0;
681 }
682 
684 {
685  ColorSpaceContext *s = ctx->priv;
686 
687  s->out_csp = s->user_csp == AVCOL_SPC_UNSPECIFIED ?
688  default_csp[FFMIN(s->user_all, CS_NB)] : s->user_csp;
689  s->out_lumacoef = av_csp_luma_coeffs_from_avcsp(s->out_csp);
690  if (!s->out_lumacoef) {
691  if (s->out_csp == AVCOL_SPC_UNSPECIFIED) {
692  if (s->user_all == CS_UNSPECIFIED) {
694  "Please specify output colorspace\n");
695  } else {
697  "Unsupported output color property %d\n", s->user_all);
698  }
699  } else {
701  "Unsupported output colorspace %d (%s)\n", s->out_csp,
702  av_color_space_name(s->out_csp));
703  }
704  return AVERROR(EINVAL);
705  }
706 
707  ff_colorspacedsp_init(&s->dsp);
708 
709  return 0;
710 }
711 
713 {
714  ColorSpaceContext *s = ctx->priv;
715 
716  av_freep(&s->rgb[0]);
717  av_freep(&s->rgb[1]);
718  av_freep(&s->rgb[2]);
719  s->rgb_sz = 0;
720  av_freep(&s->dither_scratch_base[0][0]);
721  av_freep(&s->dither_scratch_base[0][1]);
722  av_freep(&s->dither_scratch_base[1][0]);
723  av_freep(&s->dither_scratch_base[1][1]);
724  av_freep(&s->dither_scratch_base[2][0]);
725  av_freep(&s->dither_scratch_base[2][1]);
726 
727  av_freep(&s->lin_lut);
728 }
729 
731 {
732  AVFilterContext *ctx = link->dst;
733  AVFilterLink *outlink = ctx->outputs[0];
734  ColorSpaceContext *s = ctx->priv;
735  // FIXME if yuv2yuv_passthrough, don't get a new buffer but use the
736  // input one if it is writable *OR* the actual literal values of in_*
737  // and out_* are identical (not just their respective properties)
738  AVFrame *out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
739  int res;
740  ptrdiff_t rgb_stride = FFALIGN(in->width * sizeof(int16_t), 32);
741  unsigned rgb_sz = rgb_stride * in->height;
742  ThreadData td;
743 
744  if (!out) {
745  av_frame_free(&in);
746  return AVERROR(ENOMEM);
747  }
748  res = av_frame_copy_props(out, in);
749  if (res < 0) {
750  av_frame_free(&in);
751  av_frame_free(&out);
752  return res;
753  }
754 
755  out->colorspace = s->out_csp;
756  out->color_range = s->user_rng == AVCOL_RANGE_UNSPECIFIED ?
757  in->color_range : s->user_rng;
758  out->color_primaries = s->user_prm == AVCOL_PRI_UNSPECIFIED ?
759  default_prm[FFMIN(s->user_all, CS_NB)] : s->user_prm;
760  if (s->user_trc == AVCOL_TRC_UNSPECIFIED) {
762 
763  out->color_trc = default_trc[FFMIN(s->user_all, CS_NB)];
764  if (out->color_trc == AVCOL_TRC_BT2020_10 && desc && desc->comp[0].depth >= 12)
765  out->color_trc = AVCOL_TRC_BT2020_12;
766  } else {
767  out->color_trc = s->user_trc;
768  }
769 
770  if (out->color_primaries != in->color_primaries || out->color_trc != in->color_trc) {
771  av_frame_side_data_remove_by_props(&out->side_data, &out->nb_side_data,
773  }
774 
775  if (rgb_sz != s->rgb_sz) {
777  int uvw = in->width >> desc->log2_chroma_w;
778 
779  av_freep(&s->rgb[0]);
780  av_freep(&s->rgb[1]);
781  av_freep(&s->rgb[2]);
782  s->rgb_sz = 0;
783  av_freep(&s->dither_scratch_base[0][0]);
784  av_freep(&s->dither_scratch_base[0][1]);
785  av_freep(&s->dither_scratch_base[1][0]);
786  av_freep(&s->dither_scratch_base[1][1]);
787  av_freep(&s->dither_scratch_base[2][0]);
788  av_freep(&s->dither_scratch_base[2][1]);
789 
790  s->rgb[0] = av_malloc(rgb_sz);
791  s->rgb[1] = av_malloc(rgb_sz);
792  s->rgb[2] = av_malloc(rgb_sz);
793  s->dither_scratch_base[0][0] =
794  av_malloc(sizeof(*s->dither_scratch_base[0][0]) * (in->width + 4));
795  s->dither_scratch_base[0][1] =
796  av_malloc(sizeof(*s->dither_scratch_base[0][1]) * (in->width + 4));
797  s->dither_scratch_base[1][0] =
798  av_malloc(sizeof(*s->dither_scratch_base[1][0]) * (uvw + 4));
799  s->dither_scratch_base[1][1] =
800  av_malloc(sizeof(*s->dither_scratch_base[1][1]) * (uvw + 4));
801  s->dither_scratch_base[2][0] =
802  av_malloc(sizeof(*s->dither_scratch_base[2][0]) * (uvw + 4));
803  s->dither_scratch_base[2][1] =
804  av_malloc(sizeof(*s->dither_scratch_base[2][1]) * (uvw + 4));
805  s->dither_scratch[0][0] = &s->dither_scratch_base[0][0][1];
806  s->dither_scratch[0][1] = &s->dither_scratch_base[0][1][1];
807  s->dither_scratch[1][0] = &s->dither_scratch_base[1][0][1];
808  s->dither_scratch[1][1] = &s->dither_scratch_base[1][1][1];
809  s->dither_scratch[2][0] = &s->dither_scratch_base[2][0][1];
810  s->dither_scratch[2][1] = &s->dither_scratch_base[2][1][1];
811  if (!s->rgb[0] || !s->rgb[1] || !s->rgb[2] ||
812  !s->dither_scratch_base[0][0] || !s->dither_scratch_base[0][1] ||
813  !s->dither_scratch_base[1][0] || !s->dither_scratch_base[1][1] ||
814  !s->dither_scratch_base[2][0] || !s->dither_scratch_base[2][1]) {
815  uninit(ctx);
816  av_frame_free(&in);
817  av_frame_free(&out);
818  return AVERROR(ENOMEM);
819  }
820  s->rgb_sz = rgb_sz;
821  }
822  res = create_filtergraph(ctx, in, out);
823  if (res < 0) {
824  av_frame_free(&in);
825  av_frame_free(&out);
826  return res;
827  }
828  s->rgb_stride = rgb_stride / sizeof(int16_t);
829  td.in = in;
830  td.out = out;
831  td.in_linesize[0] = in->linesize[0];
832  td.in_linesize[1] = in->linesize[1];
833  td.in_linesize[2] = in->linesize[2];
834  td.out_linesize[0] = out->linesize[0];
835  td.out_linesize[1] = out->linesize[1];
836  td.out_linesize[2] = out->linesize[2];
839  if (s->yuv2yuv_passthrough) {
840  res = av_frame_copy(out, in);
841  if (res < 0) {
842  av_frame_free(&in);
843  av_frame_free(&out);
844  return res;
845  }
846  } else {
848  FFMIN((in->height + 1) >> 1, ff_filter_get_nb_threads(ctx)));
849  }
850  av_frame_free(&in);
851 
852  return ff_filter_frame(outlink, out);
853 }
854 
856  AVFilterFormatsConfig **cfg_in,
857  AVFilterFormatsConfig **cfg_out)
858 {
859  static const enum AVPixelFormat pix_fmts[] = {
865  };
866  int res;
867  const ColorSpaceContext *s = ctx->priv;
869 
870  res = ff_formats_ref(ff_make_formats_list_singleton(s->out_csp), &cfg_out[0]->color_spaces);
871  if (res < 0)
872  return res;
873  if (s->user_rng != AVCOL_RANGE_UNSPECIFIED) {
874  res = ff_formats_ref(ff_make_formats_list_singleton(s->user_rng), &cfg_out[0]->color_ranges);
875  if (res < 0)
876  return res;
877  }
878 
880  if (!formats)
881  return AVERROR(ENOMEM);
882  if (s->user_format == AV_PIX_FMT_NONE)
883  return ff_set_common_formats2(ctx, cfg_in, cfg_out, formats);
884 
885  res = ff_formats_ref(formats, &cfg_in[0]->formats);
886  if (res < 0)
887  return res;
888 
889  formats = NULL;
890  res = ff_add_format(&formats, s->user_format);
891  if (res < 0)
892  return res;
893 
894  return ff_formats_ref(formats, &cfg_out[0]->formats);
895 }
896 
897 static int config_props(AVFilterLink *outlink)
898 {
899  AVFilterContext *ctx = outlink->dst;
900  AVFilterLink *inlink = outlink->src->inputs[0];
901 
902  if (inlink->w % 2 || inlink->h % 2) {
903  av_log(ctx, AV_LOG_ERROR, "Invalid odd size (%dx%d)\n",
904  inlink->w, inlink->h);
905  return AVERROR_PATCHWELCOME;
906  }
907 
908  outlink->w = inlink->w;
909  outlink->h = inlink->h;
910  outlink->sample_aspect_ratio = inlink->sample_aspect_ratio;
911  outlink->time_base = inlink->time_base;
912 
913  return 0;
914 }
915 
916 #define OFFSET(x) offsetof(ColorSpaceContext, x)
917 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM | AV_OPT_FLAG_VIDEO_PARAM
918 #define ENUM(x, y, z) { x, "", 0, AV_OPT_TYPE_CONST, { .i64 = y }, INT_MIN, INT_MAX, FLAGS, .unit = z }
919 
920 static const AVOption colorspace_options[] = {
921  { "all", "Set all color properties together",
922  OFFSET(user_all), AV_OPT_TYPE_INT, { .i64 = CS_UNSPECIFIED },
923  CS_UNSPECIFIED, CS_NB - 1, FLAGS, .unit = "all" },
924  ENUM("bt470m", CS_BT470M, "all"),
925  ENUM("bt470bg", CS_BT470BG, "all"),
926  ENUM("bt601-6-525", CS_BT601_6_525, "all"),
927  ENUM("bt601-6-625", CS_BT601_6_625, "all"),
928  ENUM("bt709", CS_BT709, "all"),
929  ENUM("smpte170m", CS_SMPTE170M, "all"),
930  ENUM("smpte240m", CS_SMPTE240M, "all"),
931  ENUM("bt2020", CS_BT2020, "all"),
932 
933  { "space", "Output colorspace",
934  OFFSET(user_csp), AV_OPT_TYPE_INT, { .i64 = AVCOL_SPC_UNSPECIFIED },
935  AVCOL_SPC_RGB, AVCOL_SPC_NB - 1, FLAGS, .unit = "csp"},
936  ENUM("bt709", AVCOL_SPC_BT709, "csp"),
937  ENUM("fcc", AVCOL_SPC_FCC, "csp"),
938  ENUM("bt470bg", AVCOL_SPC_BT470BG, "csp"),
939  ENUM("smpte170m", AVCOL_SPC_SMPTE170M, "csp"),
940  ENUM("smpte240m", AVCOL_SPC_SMPTE240M, "csp"),
941  ENUM("ycgco", AVCOL_SPC_YCGCO, "csp"),
942  ENUM("gbr", AVCOL_SPC_RGB, "csp"),
943  ENUM("bt2020nc", AVCOL_SPC_BT2020_NCL, "csp"),
944  ENUM("bt2020ncl", AVCOL_SPC_BT2020_NCL, "csp"),
945 
946  { "range", "Output color range",
947  OFFSET(user_rng), AV_OPT_TYPE_INT, { .i64 = AVCOL_RANGE_UNSPECIFIED },
948  AVCOL_RANGE_UNSPECIFIED, AVCOL_RANGE_NB - 1, FLAGS, .unit = "rng" },
949  ENUM("tv", AVCOL_RANGE_MPEG, "rng"),
950  ENUM("mpeg", AVCOL_RANGE_MPEG, "rng"),
951  ENUM("pc", AVCOL_RANGE_JPEG, "rng"),
952  ENUM("jpeg", AVCOL_RANGE_JPEG, "rng"),
953 
954  { "primaries", "Output color primaries",
955  OFFSET(user_prm), AV_OPT_TYPE_INT, { .i64 = AVCOL_PRI_UNSPECIFIED },
956  AVCOL_PRI_RESERVED0, AVCOL_PRI_EXT_NB - 1, FLAGS, .unit = "prm" },
957  ENUM("bt709", AVCOL_PRI_BT709, "prm"),
958  ENUM("bt470m", AVCOL_PRI_BT470M, "prm"),
959  ENUM("bt470bg", AVCOL_PRI_BT470BG, "prm"),
960  ENUM("smpte170m", AVCOL_PRI_SMPTE170M, "prm"),
961  ENUM("smpte240m", AVCOL_PRI_SMPTE240M, "prm"),
962  ENUM("smpte428", AVCOL_PRI_SMPTE428, "prm"),
963  ENUM("film", AVCOL_PRI_FILM, "prm"),
964  ENUM("smpte431", AVCOL_PRI_SMPTE431, "prm"),
965  ENUM("smpte432", AVCOL_PRI_SMPTE432, "prm"),
966  ENUM("bt2020", AVCOL_PRI_BT2020, "prm"),
967  ENUM("jedec-p22", AVCOL_PRI_JEDEC_P22, "prm"),
968  ENUM("ebu3213", AVCOL_PRI_EBU3213, "prm"),
969  ENUM("vgamut", AVCOL_PRI_V_GAMUT, "prm"),
970 
971  { "trc", "Output transfer characteristics",
972  OFFSET(user_trc), AV_OPT_TYPE_INT, { .i64 = AVCOL_TRC_UNSPECIFIED },
973  AVCOL_TRC_RESERVED0, AVCOL_TRC_EXT_NB - 1, FLAGS, .unit = "trc" },
974  ENUM("bt709", AVCOL_TRC_BT709, "trc"),
975  ENUM("bt470m", AVCOL_TRC_GAMMA22, "trc"),
976  ENUM("gamma22", AVCOL_TRC_GAMMA22, "trc"),
977  ENUM("bt470bg", AVCOL_TRC_GAMMA28, "trc"),
978  ENUM("gamma28", AVCOL_TRC_GAMMA28, "trc"),
979  ENUM("smpte170m", AVCOL_TRC_SMPTE170M, "trc"),
980  ENUM("smpte240m", AVCOL_TRC_SMPTE240M, "trc"),
981  ENUM("linear", AVCOL_TRC_LINEAR, "trc"),
982  ENUM("srgb", AVCOL_TRC_IEC61966_2_1, "trc"),
983  ENUM("iec61966-2-1", AVCOL_TRC_IEC61966_2_1, "trc"),
984  ENUM("xvycc", AVCOL_TRC_IEC61966_2_4, "trc"),
985  ENUM("iec61966-2-4", AVCOL_TRC_IEC61966_2_4, "trc"),
986  ENUM("bt2020-10", AVCOL_TRC_BT2020_10, "trc"),
987  ENUM("bt2020-12", AVCOL_TRC_BT2020_12, "trc"),
988  ENUM("vlog", AVCOL_TRC_V_LOG, "trc"),
989 
990  { "format", "Output pixel format",
991  OFFSET(user_format), AV_OPT_TYPE_INT, { .i64 = AV_PIX_FMT_NONE },
992  AV_PIX_FMT_NONE, AV_PIX_FMT_GBRAP12LE, FLAGS, .unit = "fmt" },
993  ENUM("yuv420p", AV_PIX_FMT_YUV420P, "fmt"),
994  ENUM("yuv420p10", AV_PIX_FMT_YUV420P10, "fmt"),
995  ENUM("yuv420p12", AV_PIX_FMT_YUV420P12, "fmt"),
996  ENUM("yuv422p", AV_PIX_FMT_YUV422P, "fmt"),
997  ENUM("yuv422p10", AV_PIX_FMT_YUV422P10, "fmt"),
998  ENUM("yuv422p12", AV_PIX_FMT_YUV422P12, "fmt"),
999  ENUM("yuv444p", AV_PIX_FMT_YUV444P, "fmt"),
1000  ENUM("yuv444p10", AV_PIX_FMT_YUV444P10, "fmt"),
1001  ENUM("yuv444p12", AV_PIX_FMT_YUV444P12, "fmt"),
1002 
1003  { "fast", "Ignore primary chromaticity and gamma correction",
1004  OFFSET(fast_mode), AV_OPT_TYPE_BOOL, { .i64 = 0 },
1005  0, 1, FLAGS },
1006 
1007  { "dither", "Dithering mode",
1008  OFFSET(dither), AV_OPT_TYPE_INT, { .i64 = DITHER_NONE },
1009  DITHER_NONE, DITHER_NB - 1, FLAGS, .unit = "dither" },
1010  ENUM("none", DITHER_NONE, "dither"),
1011  ENUM("fsb", DITHER_FSB, "dither"),
1012 
1013  { "wpadapt", "Whitepoint adaptation method",
1014  OFFSET(wp_adapt), AV_OPT_TYPE_INT, { .i64 = WP_ADAPT_BRADFORD },
1015  WP_ADAPT_BRADFORD, NB_WP_ADAPT - 1, FLAGS, .unit = "wpadapt" },
1016  ENUM("bradford", WP_ADAPT_BRADFORD, "wpadapt"),
1017  ENUM("vonkries", WP_ADAPT_VON_KRIES, "wpadapt"),
1018  ENUM("identity", WP_ADAPT_IDENTITY, "wpadapt"),
1019 
1020  { "clipgamut",
1021  "Controls how to clip out-of-gamut colors that arise as a result of colorspace conversion.",
1022  OFFSET(clip_gamut), AV_OPT_TYPE_INT, { .i64 = CLIP_GAMUT_NONE },
1023  CLIP_GAMUT_NONE, NB_CLIP_GAMUT - 1, FLAGS, .unit = "clipgamut" },
1024  ENUM("none", CLIP_GAMUT_NONE, "clipgamut"),
1025  ENUM("rgb", CLIP_GAMUT_RGB, "clipgamut"),
1026 
1027  { "iall", "Set all input color properties together",
1028  OFFSET(user_iall), AV_OPT_TYPE_INT, { .i64 = CS_UNSPECIFIED },
1029  CS_UNSPECIFIED, CS_NB - 1, FLAGS, .unit = "all" },
1030  { "ispace", "Input colorspace",
1031  OFFSET(user_icsp), AV_OPT_TYPE_INT, { .i64 = AVCOL_SPC_UNSPECIFIED },
1032  AVCOL_PRI_RESERVED0, AVCOL_PRI_NB - 1, FLAGS, .unit = "csp" },
1033  { "irange", "Input color range",
1034  OFFSET(user_irng), AV_OPT_TYPE_INT, { .i64 = AVCOL_RANGE_UNSPECIFIED },
1035  AVCOL_RANGE_UNSPECIFIED, AVCOL_RANGE_NB - 1, FLAGS, .unit = "rng" },
1036  { "iprimaries", "Input color primaries",
1037  OFFSET(user_iprm), AV_OPT_TYPE_INT, { .i64 = AVCOL_PRI_UNSPECIFIED },
1038  AVCOL_PRI_RESERVED0, AVCOL_PRI_EXT_NB - 1, FLAGS, .unit = "prm" },
1039  { "itrc", "Input transfer characteristics",
1040  OFFSET(user_itrc), AV_OPT_TYPE_INT, { .i64 = AVCOL_TRC_UNSPECIFIED },
1041  AVCOL_TRC_RESERVED0, AVCOL_TRC_EXT_NB - 1, FLAGS, .unit = "trc" },
1042 
1043  { NULL }
1044 };
1045 
1046 AVFILTER_DEFINE_CLASS(colorspace);
1047 
1048 static const AVFilterPad inputs[] = {
1049  {
1050  .name = "default",
1051  .type = AVMEDIA_TYPE_VIDEO,
1052  .filter_frame = filter_frame,
1053  },
1054 };
1055 
1056 static const AVFilterPad outputs[] = {
1057  {
1058  .name = "default",
1059  .type = AVMEDIA_TYPE_VIDEO,
1060  .config_props = config_props,
1061  },
1062 };
1063 
1065  .p.name = "colorspace",
1066  .p.description = NULL_IF_CONFIG_SMALL("Convert between colorspaces."),
1067  .p.priv_class = &colorspace_class,
1069  .init = init,
1070  .uninit = uninit,
1071  .priv_size = sizeof(ColorSpaceContext),
1075 };
formats
formats
Definition: signature.h:47
ff_get_video_buffer
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:118
AVFrame::color_trc
enum AVColorTransferCharacteristic color_trc
Definition: frame.h:682
ColorSpaceContext::fast_mode
int fast_mode
Definition: vf_colorspace.c:129
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:216
AVFrame::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: frame.h:678
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
NB_CLIP_GAMUT
@ NB_CLIP_GAMUT
Definition: vf_colorspace.c:72
ColorSpaceContext::yuv2yuv_passthrough
int yuv2yuv_passthrough
Definition: vf_colorspace.c:151
AVCOL_PRI_EBU3213
@ AVCOL_PRI_EBU3213
EBU Tech. 3213-E (nothing there) / one of JEDEC P22 group phosphors.
Definition: pixfmt.h:652
av_clip
#define av_clip
Definition: common.h:100
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
ColorSpaceContext::rgb2yuv_fsb
rgb2yuv_fsb_fn rgb2yuv_fsb
Definition: vf_colorspace.c:158
WP_ADAPT_VON_KRIES
@ WP_ADAPT_VON_KRIES
Definition: vf_colorspace.c:63
ColorSpaceContext::user_format
enum AVPixelFormat in_format user_format
Definition: vf_colorspace.c:128
ColorSpaceContext::delin_lut
int16_t * delin_lut
Definition: vf_colorspace.c:148
AVColorTransferCharacteristic
AVColorTransferCharacteristic
Color Transfer Characteristic.
Definition: pixfmt.h:666
mem_internal.h
out
static FILE * out
Definition: movenc.c:55
AVColorPrimariesDesc
Struct that contains both white point location and primaries location, providing the complete descrip...
Definition: csp.h:78
NB_WP_ADAPT
@ NB_WP_ADAPT
Definition: vf_colorspace.c:66
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1067
ColorSpaceContext::dither_scratch_base
int * dither_scratch_base[3][2]
Definition: vf_colorspace.c:140
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:3456
ff_matrix_invert_3x3
void ff_matrix_invert_3x3(const double in[3][3], double out[3][3])
Definition: colorspace.c:27
AVCOL_TRC_LINEAR
@ AVCOL_TRC_LINEAR
"Linear transfer characteristics"
Definition: pixfmt.h:675
av_clip_uintp2
#define av_clip_uintp2
Definition: common.h:124
ColorSpaceContext::yuv2rgb
yuv2rgb_fn yuv2rgb
Definition: vf_colorspace.c:156
ff_set_common_formats2
int ff_set_common_formats2(const AVFilterContext *ctx, AVFilterFormatsConfig **cfg_in, AVFilterFormatsConfig **cfg_out, AVFilterFormats *formats)
Definition: formats.c:1136
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
ColorSpaceContext::out_txchr
const struct TransferCharacteristics * out_txchr
Definition: vf_colorspace.c:146
CS_SMPTE240M
@ CS_SMPTE240M
Definition: vf_colorspace.c:56
AVFrame::color_primaries
enum AVColorPrimaries color_primaries
Definition: frame.h:680
TransferCharacteristics::gamma
double gamma
Definition: vf_colorspace.c:115
WP_ADAPT_BRADFORD
@ WP_ADAPT_BRADFORD
Definition: vf_colorspace.c:62
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:64
AVFrame::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: frame.h:689
FILTER_INPUTS
#define FILTER_INPUTS(array)
Definition: filters.h:264
ColorSpaceContext::rgb_sz
unsigned rgb_sz
Definition: vf_colorspace.c:139
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:427
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(colorspace)
fill_whitepoint_conv_table
static void fill_whitepoint_conv_table(double out[3][3], enum WhitepointAdaptation wp_adapt, const AVWhitepointCoefficients *wp_src, const AVWhitepointCoefficients *wp_dst)
Definition: vf_colorspace.c:253
pixdesc.h
AVFrame::width
int width
Definition: frame.h:499
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:777
av_csp_luma_coeffs_from_avcsp
const struct AVLumaCoefficients * av_csp_luma_coeffs_from_avcsp(enum AVColorSpace csp)
Retrieves the Luma coefficients necessary to construct a conversion matrix from an enum constant desc...
Definition: csp.c:58
AVComponentDescriptor::depth
int depth
Number of bits in the component.
Definition: pixdesc.h:57
AVOption
AVOption.
Definition: opt.h:429
AVCOL_SPC_NB
@ AVCOL_SPC_NB
Not part of ABI.
Definition: pixfmt.h:720
AVCOL_TRC_UNSPECIFIED
@ AVCOL_TRC_UNSPECIFIED
Definition: pixfmt.h:669
ff_make_pixel_format_list
av_warn_unused_result AVFilterFormats * ff_make_pixel_format_list(const enum AVPixelFormat *fmts)
Create a list of supported pixel formats.
data
const char data[16]
Definition: mxf.c:149
rgb2yuv
static const char rgb2yuv[]
Definition: vf_scale_vulkan.c:84
ColorSpaceContext::rgb2yuv_dbl_coeffs
double rgb2yuv_dbl_coeffs[3][3]
Definition: vf_colorspace.c:160
AV_PIX_FMT_YUV420P10
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:539
AVCOL_PRI_JEDEC_P22
@ AVCOL_PRI_JEDEC_P22
Definition: pixfmt.h:653
ThreadData::out_ss_h
int out_ss_h
Definition: vf_colorspace.c:311
ColorSpaceContext::in_uv_rng
int in_uv_rng
Definition: vf_colorspace.c:161
AVCOL_SPC_RGB
@ AVCOL_SPC_RGB
order of coefficients is actually GBR, also IEC 61966-2-1 (sRGB), YZX and ST 428-1
Definition: pixfmt.h:701
AVCOL_TRC_BT2020_12
@ AVCOL_TRC_BT2020_12
ITU-R BT2020 for 12-bit system.
Definition: pixfmt.h:682
AVLumaCoefficients
Struct containing luma coefficients to be used for RGB to YUV/YCoCg, or similar calculations.
Definition: csp.h:48
CS_BT709
@ CS_BT709
Definition: vf_colorspace.c:54
WP_ADAPT_IDENTITY
@ WP_ADAPT_IDENTITY
Definition: vf_colorspace.c:65
ColorSpaceContext::lrgb2lrgb_coeffs
int16_t lrgb2lrgb_coeffs[3][3][8]
Definition: vf_colorspace.c:144
AVColorPrimaries
AVColorPrimaries
Chromaticity coordinates of the source primaries.
Definition: pixfmt.h:636
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:220
ThreadData::out
AVFrame * out
Definition: af_adeclick.c:526
get_transfer_characteristics
static const struct TransferCharacteristics * get_transfer_characteristics(enum AVColorTransferCharacteristic trc)
Definition: vf_colorspace.c:191
video.h
ThreadData::in
AVFrame * in
Definition: af_adecorrelate.c:155
ff_make_formats_list_singleton
AVFilterFormats * ff_make_formats_list_singleton(int fmt)
Equivalent to ff_make_format_list({const int[]}{ fmt, -1 })
Definition: formats.c:595
colorspace_options
static const AVOption colorspace_options[]
Definition: vf_colorspace.c:920
Colorspace
Colorspace
Definition: vf_colorspace.c:48
ColorSpaceContext::rgb2rgb_passthrough
int rgb2rgb_passthrough
Definition: vf_colorspace.c:147
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:448
AVFilterFormats
A list of supported formats for one end of a filter link.
Definition: formats.h:64
formats.h
AV_PIX_FMT_GBRAP12LE
@ AV_PIX_FMT_GBRAP12LE
planar GBR 4:4:4:4 48bpp, little-endian
Definition: pixfmt.h:311
DITHER_FSB
@ DITHER_FSB
Definition: vf_colorspace.c:44
AVCOL_SPC_BT470BG
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
Definition: pixfmt.h:706
rgb
Definition: rpzaenc.c:60
AVCOL_TRC_IEC61966_2_1
@ AVCOL_TRC_IEC61966_2_1
IEC 61966-2-1 (sRGB or sYCC)
Definition: pixfmt.h:680
av_color_space_name
const char * av_color_space_name(enum AVColorSpace space)
Definition: pixdesc.c:3856
ThreadData::out_linesize
ptrdiff_t out_linesize[3]
Definition: vf_colorspace.c:310
colorspace.h
AVCOL_RANGE_NB
@ AVCOL_RANGE_NB
Not part of ABI.
Definition: pixfmt.h:778
AVCOL_TRC_GAMMA28
@ AVCOL_TRC_GAMMA28
also ITU-R BT470BG
Definition: pixfmt.h:672
ColorSpaceContext
Definition: vf_colorspace.c:118
CS_BT2020
@ CS_BT2020
Definition: vf_colorspace.c:57
CS_BT601_6_525
@ CS_BT601_6_525
Definition: vf_colorspace.c:52
AVCOL_TRC_GAMMA22
@ AVCOL_TRC_GAMMA22
also ITU-R BT470M / ITU-R BT1700 625 PAL & SECAM
Definition: pixfmt.h:671
AVFilterPad
A filter pad used for either input or output.
Definition: filters.h:40
ColorSpaceContext::yuv_offset
int16_t yuv_offset[2][8]
Definition: vf_colorspace.c:155
AV_PIX_FMT_YUV444P10
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:542
avassert.h
lrint
#define lrint
Definition: tablegen.h:53
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:210
ColorSpaceContext::wp_adapt
int wp_adapt
Definition: vf_colorspace.c:133
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:106
transfer_characteristics
static const struct TransferCharacteristics transfer_characteristics[]
Definition: vf_colorspace.c:177
FFFilter
Definition: filters.h:267
AVCOL_PRI_RESERVED0
@ AVCOL_PRI_RESERVED0
Definition: pixfmt.h:637
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:86
dither
static const uint16_t dither[8][8]
Definition: vf_gradfun.c:46
s
#define s(width, name)
Definition: cbs_vp9.c:198
DITHER_NB
@ DITHER_NB
Definition: vf_colorspace.c:45
AVCOL_PRI_NB
@ AVCOL_PRI_NB
Not part of ABI.
Definition: pixfmt.h:654
av_csp_primaries_desc_from_id
const AVColorPrimariesDesc * av_csp_primaries_desc_from_id(enum AVColorPrimaries prm)
Retrieves a complete gamut description from an enum constant describing the color primaries.
Definition: csp.c:95
CS_BT470BG
@ CS_BT470BG
Definition: vf_colorspace.c:51
CS_UNSPECIFIED
@ CS_UNSPECIFIED
Definition: vf_colorspace.c:49
AVCOL_SPC_SMPTE170M
@ AVCOL_SPC_SMPTE170M
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC / functionally identical to above
Definition: pixfmt.h:707
ColorSpaceContext::yuv2rgb_coeffs
int16_t yuv2rgb_coeffs[3][3][8]
Definition: vf_colorspace.c:152
get_range_off
static int get_range_off(AVFilterContext *ctx, int *off, int *y_rng, int *uv_rng, enum AVColorRange rng, int depth)
Definition: vf_colorspace.c:384
ff_formats_ref
int ff_formats_ref(AVFilterFormats *f, AVFilterFormats **ref)
Add *ref as a new reference to formats.
Definition: formats.c:755
av_q2d
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
ColorSpaceDSPContext
Definition: colorspacedsp.h:59
bits
uint8_t bits
Definition: vp3data.h:128
filter_frame
static int filter_frame(AVFilterLink *link, AVFrame *in)
Definition: vf_colorspace.c:730
filters.h
default_trc
static enum AVColorTransferCharacteristic default_trc[CS_NB+1]
Definition: vf_colorspace.c:75
ctx
static AVFormatContext * ctx
Definition: movenc.c:49
AVCOL_PRI_SMPTE428
@ AVCOL_PRI_SMPTE428
SMPTE ST 428-1 (CIE 1931 XYZ)
Definition: pixfmt.h:648
AVFilterFormatsConfig::color_spaces
AVFilterFormats * color_spaces
Lists of supported YUV color metadata, only for YUV video.
Definition: avfilter.h:141
AVPixFmtDescriptor::log2_chroma_w
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
Definition: pixdesc.h:80
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
AVCOL_PRI_SMPTE240M
@ AVCOL_PRI_SMPTE240M
identical to above, also called "SMPTE C" even though it uses D65
Definition: pixfmt.h:645
FILTER_OUTPUTS
#define FILTER_OUTPUTS(array)
Definition: filters.h:265
AVCOL_PRI_UNSPECIFIED
@ AVCOL_PRI_UNSPECIFIED
Definition: pixfmt.h:639
ColorSpaceContext::yuv2yuv
yuv2yuv_fn yuv2yuv
Definition: vf_colorspace.c:159
tmp
static uint8_t tmp[40]
Definition: aes_ctr.c:52
link
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a link
Definition: filter_design.txt:23
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:87
AVCOL_PRI_BT470BG
@ AVCOL_PRI_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM
Definition: pixfmt.h:643
ColorSpaceContext::rgb2yuv_coeffs
int16_t rgb2yuv_coeffs[3][3][8]
Definition: vf_colorspace.c:153
AVCOL_PRI_SMPTE170M
@ AVCOL_PRI_SMPTE170M
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC
Definition: pixfmt.h:644
ColorSpaceContext::dither
int dither
Definition: vf_colorspace.c:131
ColorSpaceContext::user_irng
enum AVColorRange in_rng out_rng user_rng user_irng
Definition: vf_colorspace.c:125
init
static av_cold int init(AVFilterContext *ctx)
Definition: vf_colorspace.c:683
av_color_range_name
const char * av_color_range_name(enum AVColorRange range)
Definition: pixdesc.c:3772
ColorSpaceContext::yuv2yuv_coeffs
int16_t yuv2yuv_coeffs[3][3][8]
Definition: vf_colorspace.c:154
ff_matrix_mul_3x3
void ff_matrix_mul_3x3(double dst[3][3], const double src1[3][3], const double src2[3][3])
Definition: colorspace.c:54
config_props
static int config_props(AVFilterLink *outlink)
Definition: vf_colorspace.c:897
CS_NB
@ CS_NB
Definition: vf_colorspace.c:58
AVCOL_TRC_RESERVED0
@ AVCOL_TRC_RESERVED0
Definition: pixfmt.h:667
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:76
TransferCharacteristics::alpha
double alpha
Definition: vf_colorspace.c:115
av_clip_int16
#define av_clip_int16
Definition: common.h:115
NULL
#define NULL
Definition: coverity.c:32
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:599
CS_SMPTE170M
@ CS_SMPTE170M
Definition: vf_colorspace.c:55
ColorSpaceContext::user_itrc
enum AVColorTransferCharacteristic in_trc out_trc user_trc user_itrc
Definition: vf_colorspace.c:126
AVCOL_TRC_IEC61966_2_4
@ AVCOL_TRC_IEC61966_2_4
IEC 61966-2-4.
Definition: pixfmt.h:678
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:85
AVFilterContext::inputs
AVFilterLink ** inputs
array of pointers to input links
Definition: avfilter.h:282
AVCOL_PRI_BT709
@ AVCOL_PRI_BT709
also ITU-R BT1361 / IEC 61966-2-4 / SMPTE RP 177 Annex B
Definition: pixfmt.h:638
ff_add_format
int ff_add_format(AVFilterFormats **avff, int64_t fmt)
Add fmt to the list of media formats contained in *avff.
Definition: formats.c:570
fill_gamma_table
static int fill_gamma_table(ColorSpaceContext *s)
Definition: vf_colorspace.c:204
ColorSpaceContext::lin_lut
int16_t * lin_lut
Definition: vf_colorspace.c:148
CLIP_GAMUT_NONE
@ CLIP_GAMUT_NONE
Definition: vf_colorspace.c:70
av_color_primaries_name
const char * av_color_primaries_name(enum AVColorPrimaries primaries)
Definition: pixdesc.c:3790
double
double
Definition: af_crystalizer.c:132
AVCOL_TRC_BT2020_10
@ AVCOL_TRC_BT2020_10
ITU-R BT2020 for 10-bit system.
Definition: pixfmt.h:681
AVCOL_SPC_YCGCO
@ AVCOL_SPC_YCGCO
used by Dirac / VC-2 and H.264 FRext, see ITU-T SG16
Definition: pixfmt.h:709
AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:540
ColorSpaceContext::in_txchr
const struct TransferCharacteristics * in_txchr
Definition: vf_colorspace.c:146
AVCIExy
Struct containing chromaticity x and y values for the standard CIE 1931 chromaticity definition.
Definition: csp.h:56
ColorSpaceContext::user_iall
enum Colorspace user_all user_iall
Definition: vf_colorspace.c:123
AVCOL_RANGE_UNSPECIFIED
@ AVCOL_RANGE_UNSPECIFIED
Definition: pixfmt.h:743
AVFilterFormatsConfig
Lists of formats / etc.
Definition: avfilter.h:121
CLIP_GAMUT_RGB
@ CLIP_GAMUT_RGB
Definition: vf_colorspace.c:71
AVCOL_PRI_BT2020
@ AVCOL_PRI_BT2020
ITU-R BT2020.
Definition: pixfmt.h:647
uninit
static void uninit(AVFilterContext *ctx)
Definition: vf_colorspace.c:712
ColorSpaceContext::out_y_rng
int out_y_rng
Definition: vf_colorspace.c:161
AVCIExy::x
AVRational x
Definition: csp.h:57
ColorSpaceContext::lrgb2lrgb_passthrough
int lrgb2lrgb_passthrough
Definition: vf_colorspace.c:143
AVCOL_PRI_SMPTE431
@ AVCOL_PRI_SMPTE431
SMPTE ST 431-2 (2011) / DCI P3.
Definition: pixfmt.h:650
yuv2yuv_fn
void(* yuv2yuv_fn)(uint8_t *yuv_out[3], const ptrdiff_t yuv_out_stride[3], uint8_t *yuv_in[3], const ptrdiff_t yuv_in_stride[3], int w, int h, const int16_t yuv2yuv_coeffs[3][3][8], const int16_t yuv_offset[2][8])
Definition: colorspacedsp.h:40
pix_fmts
static enum AVPixelFormat pix_fmts[4][4]
Definition: lcevc_parser.c:75
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:94
AVCOL_TRC_SMPTE240M
@ AVCOL_TRC_SMPTE240M
Definition: pixfmt.h:674
AVCOL_PRI_FILM
@ AVCOL_PRI_FILM
colour filters using Illuminant C
Definition: pixfmt.h:646
DECLARE_ALIGNED
#define DECLARE_ALIGNED(n, t, v)
Definition: mem_internal.h:104
av_frame_copy
int av_frame_copy(AVFrame *dst, const AVFrame *src)
Copy the frame data from src to dst.
Definition: frame.c:711
ColorSpaceContext::yuv2yuv_fastmode
int yuv2yuv_fastmode
Definition: vf_colorspace.c:151
OFFSET
#define OFFSET(x)
Definition: vf_colorspace.c:916
AV_PIX_FMT_YUV422P12
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:544
xyz2rgb
static const float xyz2rgb[3][3]
Definition: tiff.c:1896
AV_PIX_FMT_YUV444P12
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:546
TransferCharacteristics
Definition: vf_colorspace.c:114
ColorSpaceContext::rgb2yuv
rgb2yuv_fn rgb2yuv
Definition: vf_colorspace.c:157
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:514
frame.h
ColorSpaceContext::clip_gamut
int clip_gamut
Definition: vf_colorspace.c:135
ColorSpaceContext::in_y_rng
int in_y_rng
Definition: vf_colorspace.c:161
ColorSpaceContext::yuv2rgb_dbl_coeffs
double yuv2rgb_dbl_coeffs[3][3]
Definition: vf_colorspace.c:160
query_formats
static int query_formats(const AVFilterContext *ctx, AVFilterFormatsConfig **cfg_in, AVFilterFormatsConfig **cfg_out)
Definition: vf_colorspace.c:855
csp.h
AVCOL_TRC_EXT_NB
@ AVCOL_TRC_EXT_NB
Not part of ABI.
Definition: pixfmt.h:693
AVFilterFormatsConfig::color_ranges
AVFilterFormats * color_ranges
AVColorRange.
Definition: avfilter.h:142
av_frame_side_data_remove_by_props
void av_frame_side_data_remove_by_props(AVFrameSideData ***sd, int *nb_sd, int props)
Remove and free all side data instances that match any of the given side data properties.
Definition: side_data.c:117
AVCOL_TRC_BT709
@ AVCOL_TRC_BT709
also ITU-R BT1361
Definition: pixfmt.h:668
AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
#define AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
Some filters support a generic "enable" expression option that can be used to enable or disable a fil...
Definition: avfilter.h:197
AVCOL_SPC_SMPTE240M
@ AVCOL_SPC_SMPTE240M
derived from 170M primaries and D65 white point, 170M is derived from BT470 System M's primaries
Definition: pixfmt.h:708
convert
static int convert(AVFilterContext *ctx, void *data, int job_nr, int n_jobs)
Definition: vf_colorspace.c:314
yuv2yuv
static void fn() yuv2yuv(uint8_t *_dst[3], const ptrdiff_t dst_stride[3], uint8_t *_src[3], const ptrdiff_t src_stride[3], int w, int h, const int16_t c[3][3][8], const int16_t yuv_offset[2][8])
Definition: colorspacedsp_yuv2yuv_template.c:40
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:68
ff_fill_rgb2yuv_table
void ff_fill_rgb2yuv_table(const AVLumaCoefficients *coeffs, double rgb2yuv[3][3])
Definition: colorspace.c:125
AVCOL_SPC_BT2020_NCL
@ AVCOL_SPC_BT2020_NCL
ITU-R BT2020 non-constant luminance system.
Definition: pixfmt.h:711
ColorSpaceContext::dither_scratch
int * dither_scratch[3][2]
Definition: vf_colorspace.c:140
ColorSpaceContext::in_primaries
const AVColorPrimariesDesc * in_primaries
Definition: vf_colorspace.c:142
AVCOL_PRI_V_GAMUT
@ AVCOL_PRI_V_GAMUT
Definition: pixfmt.h:658
AVColorSpace
AVColorSpace
YUV colorspace type.
Definition: pixfmt.h:700
CS_BT601_6_625
@ CS_BT601_6_625
Definition: vf_colorspace.c:53
ff_filter_get_nb_threads
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
Definition: avfilter.c:845
ColorSpaceContext::in_lumacoef
const AVLumaCoefficients * in_lumacoef
Definition: vf_colorspace.c:150
ThreadData
Used for passing data between threads.
Definition: dsddec.c:71
FILTER_QUERY_FUNC2
#define FILTER_QUERY_FUNC2(func)
Definition: filters.h:241
ColorSpaceContext::out_primaries
const AVColorPrimariesDesc * out_primaries
Definition: vf_colorspace.c:142
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
DitherMode
DitherMode
Definition: vf_colorspace.c:42
AVFilterPad::name
const char * name
Pad name.
Definition: filters.h:46
AVCOL_SPC_UNSPECIFIED
@ AVCOL_SPC_UNSPECIFIED
Definition: pixfmt.h:703
FLAGS
#define FLAGS
Definition: vf_colorspace.c:917
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
Definition: pixfmt.h:760
AV_SIDE_DATA_PROP_COLOR_DEPENDENT
@ AV_SIDE_DATA_PROP_COLOR_DEPENDENT
Side data depends on the video color space.
Definition: frame.h:316
ColorSpaceContext::dsp
ColorSpaceDSPContext dsp
Definition: vf_colorspace.c:121
NB_WP_ADAPT_NON_IDENTITY
@ NB_WP_ADAPT_NON_IDENTITY
Definition: vf_colorspace.c:64
AVCOL_PRI_BT470M
@ AVCOL_PRI_BT470M
also FCC Title 47 Code of Federal Regulations 73.682 (a)(20)
Definition: pixfmt.h:641
ClipGamutMode
ClipGamutMode
Definition: vf_colorspace.c:69
pixfmt.h
outputs
static const AVFilterPad outputs[]
Definition: vf_colorspace.c:1056
av_malloc
void * av_malloc(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:98
ColorSpaceContext::out_lumacoef
const AVLumaCoefficients * out_lumacoef
Definition: vf_colorspace.c:150
AVCIExy::y
AVRational y
Definition: csp.h:57
ff_fill_rgb2xyz_table
void ff_fill_rgb2xyz_table(const AVPrimaryCoefficients *coeffs, const AVWhitepointCoefficients *wp, double rgb2xyz[3][3])
Definition: colorspace.c:79
AV_PIX_FMT_YUV420P12
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:543
CS_BT470M
@ CS_BT470M
Definition: vf_colorspace.c:50
yuv2rgb_fn
void(* yuv2rgb_fn)(int16_t *rgb[3], ptrdiff_t rgb_stride, uint8_t *yuv[3], const ptrdiff_t yuv_stride[3], int w, int h, const int16_t yuv2rgb_coeffs[3][3][8], const int16_t yuv_offset[8])
Definition: colorspacedsp.h:27
ColorSpaceContext::user_icsp
enum AVColorSpace in_csp out_csp user_csp user_icsp
Definition: vf_colorspace.c:124
AVFrame::height
int height
Definition: frame.h:499
AVCOL_PRI_EXT_NB
@ AVCOL_PRI_EXT_NB
Not part of ABI.
Definition: pixfmt.h:659
default_csp
static enum AVColorSpace default_csp[CS_NB+1]
Definition: vf_colorspace.c:101
ff_filter_execute
int ff_filter_execute(AVFilterContext *ctx, avfilter_action_func *func, void *arg, int *ret, int nb_jobs)
Definition: avfilter.c:1691
default_prm
static enum AVColorPrimaries default_prm[CS_NB+1]
Definition: vf_colorspace.c:88
ff_vf_colorspace
const FFFilter ff_vf_colorspace
Definition: vf_colorspace.c:1064
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
AVCOL_SPC_FCC
@ AVCOL_SPC_FCC
FCC Title 47 Code of Federal Regulations 73.682 (a)(20)
Definition: pixfmt.h:705
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Underlying C type is int.
Definition: opt.h:259
avfilter.h
colorspacedsp.h
AVPixFmtDescriptor::comp
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:105
rgb2yuv_fn
void(* rgb2yuv_fn)(uint8_t *yuv[3], const ptrdiff_t yuv_stride[3], int16_t *rgb[3], ptrdiff_t rgb_stride, int w, int h, const int16_t rgb2yuv_coeffs[3][3][8], const int16_t yuv_offset[8])
Definition: colorspacedsp.h:31
ColorSpaceContext::out_uv_rng
int out_uv_rng
Definition: vf_colorspace.c:161
AVCOL_TRC_SMPTE170M
@ AVCOL_TRC_SMPTE170M
also ITU-R BT601-6 525 or 625 / ITU-R BT1358 525 or 625 / ITU-R BT1700 NTSC
Definition: pixfmt.h:673
inputs
static const AVFilterPad inputs[]
Definition: vf_colorspace.c:1048
ThreadData::in_ss_h
int in_ss_h
Definition: vf_colorspace.c:311
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:78
AVFilterContext
An instance of a filter.
Definition: avfilter.h:274
ColorSpaceContext::did_warn_range
int did_warn_range
Definition: vf_colorspace.c:163
WhitepointAdaptation
WhitepointAdaptation
Definition: vf_colorspace.c:61
ColorSpaceContext::user_iprm
enum AVColorPrimaries in_prm out_prm user_prm user_iprm
Definition: vf_colorspace.c:127
AVFILTER_FLAG_SLICE_THREADS
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:167
desc
const char * desc
Definition: libsvtav1.c:82
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:200
FFFilter::p
AVFilter p
The public AVFilter.
Definition: filters.h:271
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:77
ENUM
#define ENUM(x, y, z)
Definition: vf_colorspace.c:918
mem.h
rgb2yuv_fsb_fn
void(* rgb2yuv_fsb_fn)(uint8_t *yuv[3], const ptrdiff_t yuv_stride[3], int16_t *rgb[3], ptrdiff_t rgb_stride, int w, int h, const int16_t rgb2yuv_coeffs[3][3][8], const int16_t yuv_offset[8], int *rnd[3][2])
Definition: colorspacedsp.h:35
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
w
uint8_t w
Definition: llvidencdsp.c:39
AVCOL_PRI_SMPTE432
@ AVCOL_PRI_SMPTE432
SMPTE ST 432-1 (2010) / P3 D65 / Display P3.
Definition: pixfmt.h:651
AVCOL_TRC_V_LOG
@ AVCOL_TRC_V_LOG
Definition: pixfmt.h:692
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Underlying C type is int.
Definition: opt.h:327
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
DITHER_NONE
@ DITHER_NONE
Definition: vf_colorspace.c:43
TransferCharacteristics::beta
double beta
Definition: vf_colorspace.c:115
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:472
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
ma
#define ma
Definition: vf_colormatrix.c:98
TransferCharacteristics::delta
double delta
Definition: vf_colorspace.c:115
h
h
Definition: vp9dsp_template.c:2070
stride
#define stride
Definition: h264pred_template.c:536
supported_format
#define supported_format(d)
AVCOL_SPC_BT709
@ AVCOL_SPC_BT709
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / derived in SMPTE RP 177 Annex B
Definition: pixfmt.h:702
AVColorRange
AVColorRange
Visual content value range.
Definition: pixfmt.h:742
create_filtergraph
static int create_filtergraph(AVFilterContext *ctx, const AVFrame *in, const AVFrame *out)
Definition: vf_colorspace.c:414
ThreadData::in_linesize
ptrdiff_t in_linesize[3]
Definition: vf_colorspace.c:310
yuv2rgb
static void yuv2rgb(uint8_t *out, int ridx, int Y, int U, int V)
Definition: g2meet.c:263
av_color_transfer_name
const char * av_color_transfer_name(enum AVColorTransferCharacteristic transfer)
Definition: pixdesc.c:3823
ff_colorspacedsp_init
void ff_colorspacedsp_init(ColorSpaceDSPContext *dsp)
Definition: colorspacedsp.c:102
AVPixFmtDescriptor::log2_chroma_h
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:89
ColorSpaceContext::rgb_stride
ptrdiff_t rgb_stride
Definition: vf_colorspace.c:138
apply_lut
static void apply_lut(int16_t *buf[3], ptrdiff_t stride, int w, int h, const int16_t *lut)
Definition: vf_colorspace.c:291
av_get_pix_fmt_name
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:3376